diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 26040bb1..2b21a539 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -17,7 +17,7 @@ name: "Build and check HTML" jobs: build: name: Build site and generate HTML - runs-on: ubuntu-latest + runs-on: nscloud-ubuntu-22.04-amd64-8x16 steps: - name: Install deps for figures (OS packages) @@ -25,9 +25,9 @@ jobs: sudo apt update && sudo apt install -y poppler-utils - name: Install deps for figures (TeX) - uses: teatimeguest/setup-texlive-action@v3 + uses: zauguin/install-texlive@v4 with: - version: 2024 + texlive_version: 2024 packages: | scheme-small latex-bin diff --git a/.github/workflows/merge-main-nightly.yml b/.github/workflows/merge-main-nightly.yml index 06e9e4e1..14188b6b 100644 --- a/.github/workflows/merge-main-nightly.yml +++ b/.github/workflows/merge-main-nightly.yml @@ -14,7 +14,7 @@ env: jobs: merge-to-nightly: - runs-on: ubuntu-latest + runs-on: nscloud-ubuntu-22.04-amd64-8x16 if: github.repository == 'leanprover/reference-manual' steps: - name: Checkout repository @@ -51,9 +51,9 @@ jobs: sudo apt update && sudo apt install -y poppler-utils - name: Install deps for figures (TeX) - uses: teatimeguest/setup-texlive-action@v3 + uses: zauguin/install-texlive@v4 with: - version: 2024 + texlive_version: 2024 packages: | scheme-small latex-bin diff --git a/.github/workflows/pr-testing.yml b/.github/workflows/pr-testing.yml index 1448ac06..c5277a8b 100644 --- a/.github/workflows/pr-testing.yml +++ b/.github/workflows/pr-testing.yml @@ -9,7 +9,7 @@ name: "Report PR testing status to the lean4 repository" jobs: build: name: Build site and generate HTML - runs-on: ubuntu-latest + runs-on: nscloud-ubuntu-22.04-amd64-8x16 steps: - name: Install deps for figures (OS packages) @@ -17,9 +17,9 @@ jobs: sudo apt update && sudo apt install -y poppler-utils - name: Install deps for figures (TeX) - uses: teatimeguest/setup-texlive-action@v3 + uses: zauguin/install-texlive@v4 with: - version: 2024 + texlive_version: 2024 packages: | scheme-small latex-bin diff --git a/.github/workflows/release-tag.yml b/.github/workflows/release-tag.yml index 51899ea5..42750448 100644 --- a/.github/workflows/release-tag.yml +++ b/.github/workflows/release-tag.yml @@ -41,9 +41,9 @@ jobs: ./deploy/prep.sh - name: Install deps for figures (TeX) - uses: teatimeguest/setup-texlive-action@v3 + uses: zauguin/install-texlive@v4 with: - version: 2024 + texlive_version: 2024 packages: | scheme-small latex-bin diff --git a/.github/workflows/update-nightly.yml b/.github/workflows/update-nightly.yml index 553958b1..293f97be 100644 --- a/.github/workflows/update-nightly.yml +++ b/.github/workflows/update-nightly.yml @@ -14,7 +14,7 @@ jobs: # This job checks whether there's been a new nightly since the last # successful automatic update check-update: - runs-on: ubuntu-latest + runs-on: nscloud-ubuntu-22.04-amd64-8x16 if: github.repository == 'leanprover/reference-manual' outputs: update-needed: ${{ steps.check-update.outputs.update-needed }} @@ -96,9 +96,9 @@ jobs: sudo apt update && sudo apt install -y poppler-utils - name: Install deps for figures (TeX) - uses: teatimeguest/setup-texlive-action@v3 + uses: zauguin/install-texlive@v4 with: - version: 2024 + texlive_version: 2024 packages: | scheme-small latex-bin diff --git a/.vale/scripts/rewrite_html.py b/.vale/scripts/rewrite_html.py index 4639b910..c6d29718 100644 --- a/.vale/scripts/rewrite_html.py +++ b/.vale/scripts/rewrite_html.py @@ -30,6 +30,13 @@ def process_html_file(filepath, output_filepath): elif code_tag.attrs and 'class' in code_tag.attrs and 'hl' in code_tag['class'] and 'lean' in code_tag['class']: code_tag.decompose() + # Delete all content in error explanation pages. This comes from the lean4 repo + # and shouldn't be linted here. + for element in soup.find_all(class_='error-example-container'): + in_sections = element.find_parents('section') + if in_sections: + in_sections[-1].decompose() + # Delete docstring content (for now) for element in soup.find_all(class_="namedocs"): element.decompose() diff --git a/.vale/styles/Lean/Names.yml b/.vale/styles/Lean/Names.yml index fda660d9..4bec7697 100644 --- a/.vale/styles/Lean/Names.yml +++ b/.vale/styles/Lean/Names.yml @@ -1,7 +1,9 @@ +# This file suggests correct capitalizations / accents for names. extends: substitution message: Use '%s' instead of '%s'. level: error ignorecase: true +# In this list, the key is case-insensitive, and the value should contain the correct case. swap: - 'de moura': 'de Moura' - 'de bruijn': 'de Bruijn' @@ -9,7 +11,7 @@ swap: - 'carneiro': 'Carneiro' - 'collatz': 'Collatz' - 'lua': 'Lua' - - 'Madelaine': 'Madelaine' + - 'madelaine': 'Madelaine' - 'mathlib': 'Mathlib' - 'merkin': 'Merkin' - 'peano': 'Peano' @@ -17,4 +19,5 @@ swap: - 'simons': 'Simons' - 'ullrich': 'Ullrich' - 'wadler': 'Wadler' + - 'grober': 'Gröbner' diff --git a/.vale/styles/config/ignore/names.txt b/.vale/styles/config/ignore/names.txt index 873e369e..f6f56e5f 100644 --- a/.vale/styles/config/ignore/names.txt +++ b/.vale/styles/config/ignore/names.txt @@ -14,3 +14,9 @@ Streicher Streicher's Ullrich Wadler +Wojciech +Nawrocki +Nawrocki's +Rustan +Leino +Leino's diff --git a/.vale/styles/config/ignore/terms.txt b/.vale/styles/config/ignore/terms.txt index e07080de..b6ad9606 100644 --- a/.vale/styles/config/ignore/terms.txt +++ b/.vale/styles/config/ignore/terms.txt @@ -24,6 +24,7 @@ constructorless conv cumulative cumulativity +cutsat deallocate deallocated deallocates @@ -195,3 +196,4 @@ unparenthesized uploader upvote walkthrough +zulip diff --git a/ExtractExplanationExamples.lean b/ExtractExplanationExamples.lean new file mode 100644 index 00000000..f53c5b43 --- /dev/null +++ b/ExtractExplanationExamples.lean @@ -0,0 +1,135 @@ +/- +Copyright (c) 2025 Lean FRO LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Author: Joseph Rotella +-/ + +import Lean.ErrorExplanations +import SubVerso.Highlighting + +/-! +Tool for extracting rendering data from a batch of error-explanation MWEs with +identical imports. We use this in a preprocessing step rather than during the +manual's elaboration so that we can group MWEs with the same imports, which +avoids the sizable performance overhead of reloading the environment for each +example. +-/ + +open Lean Meta Elab Term SubVerso Highlighting + +structure ExtractedExample where + highlighted : Highlighted + messages : Array (MessageSeverity × String) + hash : UInt64 + version : String + deriving ToJson, FromJson + +/-- Returns the result of processing this example as well as the environment and message log +produced *only* by the header-processing step (which is taken to be `envWithMsgs?` if it's supplied) -/ +def processMWE (input : String) (inputHash : UInt64) (envWithMsgs? : Option (Environment × MessageLog)) : + IO (ExtractedExample × Environment × MessageLog) := do + let fileName := "Main.lean" + let inputCtx := Parser.mkInputContext input fileName + let (hdrStx, s, msgs) ← Parser.parseHeader inputCtx + let (env, msgs') ← envWithMsgs?.getDM <| processHeader hdrStx {} {} inputCtx + let msgs := msgs ++ msgs' + let cmdState := Command.mkState env msgs + + -- If header processing failed, don't try to elaborate the body; however, we + -- must still parse it for the syntax highlighter + let shouldElab := !msgs.hasErrors + let mut (cmdState, stxs) ← processCommands inputCtx s cmdState shouldElab + stxs := #[hdrStx] ++ stxs + let nonSilentMsgs := cmdState.messages.toArray.filter (!·.isSilent) + let hls ← mkHighlights cmdState nonSilentMsgs inputCtx stxs + let msgs ← mkMessages nonSilentMsgs + let ex := { + highlighted := hls + messages := msgs + hash := inputHash + version := Lean.versionString + } + return (ex, env, msgs') +where + processCommands (inputCtx : Parser.InputContext) (s : Parser.ModuleParserState) (cmdState : Command.State) (doElab : Bool) := do + let mut s := s + let mut cmdState := cmdState + let mut stxs := #[] + repeat + let scope := cmdState.scopes.head! + let pmctx : Parser.ParserModuleContext := { + env := cmdState.env, + options := scope.opts, + currNamespace := scope.currNamespace, + openDecls := scope.openDecls + } + let (stx, s', msgs') := Parser.parseCommand inputCtx pmctx s cmdState.messages + s := s' + cmdState := {cmdState with messages := msgs'} + stxs := stxs.push stx + if doElab then + (_, cmdState) ← runCommandElabM (Command.elabCommandTopLevel stx) inputCtx cmdState s + if Parser.isTerminalCommand stx then + break + return (cmdState, stxs) + + withNewline (str : String) := + if str == "" || str.back != '\n' then str ++ "\n" else str + + mkHighlights (cmdState : Command.State) (nonSilentMsgs : Array Message) + (inputCtx : Parser.InputContext) (cmds : Array Syntax) : + IO Highlighted := + let termElab : TermElabM Highlighted := do + let mut hls := Highlighted.empty + let mut lastPos : String.Pos := 0 + for cmd in cmds do + let hl ← highlightIncludingUnparsed cmd nonSilentMsgs cmdState.infoState.trees [] lastPos + hls := hls ++ hl + lastPos := (cmd.getTrailingTailPos?).getD lastPos + return hls + Prod.fst <$> runCommandElabM (Command.liftTermElabM termElab) inputCtx cmdState + + mkMessages (nonSilentMsgs : Array Message) := do + nonSilentMsgs.mapM fun msg => do + let head := if msg.caption != "" then msg.caption ++ ":\n" else "" + let txt := withNewline <| head ++ (← msg.data.toString) + pure (msg.severity, txt) + + runCommandElabM {α} (x : Command.CommandElabM α) (ictx : Parser.InputContext) + (cmdState : Command.State) (s? : Option Parser.ModuleParserState := none) : + IO (α × Command.State) := do + let ctx : Command.Context := { + cmdPos := s?.map (·.pos) |>.getD 0 + fileName := ictx.fileName + fileMap := ictx.fileMap + snap? := none + cancelTk? := none + } + let eio := x.run ctx |>.run cmdState + match (← eio.toIO') with + | .ok res => return res + | .error e => throw <| IO.userError (← e.toMessageData.toString) + +def writeEx (outDir : System.FilePath) (id : String) (json : String) : IO Unit := do + if ! (← System.FilePath.pathExists outDir) then + IO.FS.createDir outDir + let path := outDir / (id ++ ".json") + IO.FS.writeFile path json + +unsafe def main (args : List String) : IO UInt32 := do + initSearchPath (← findSysroot) + enableInitializersExecution + let outDir :: inFiles := args | + throw <| IO.userError "Usage: extract_explanation_examples \n\ + where all input files have the same imports" + let mut envWithMsgs? : Option (Environment × MessageLog) := none + for file in inFiles do + let input ← IO.FS.readFile file + let inputHash := hash input + let some exampleName := (file : System.FilePath).fileStem | + throw <| IO.userError s!"Malformed file path: {file}" + let (ex, env, msgs) ← processMWE input inputHash envWithMsgs? + envWithMsgs? := some (env, msgs) + let json := (toJson ex).compress + writeEx outDir exampleName json + return 0 diff --git a/Main.lean b/Main.lean index 468a1ca1..d1b866cb 100644 --- a/Main.lean +++ b/Main.lean @@ -15,14 +15,11 @@ def searchModule := {{ }} - open Verso.Output.Html in def plausible := {{ }} - - def fuzzysortLicense : LicenseInfo where identifier := "MIT" dependency := "fuzzysort v3.1.0" @@ -81,13 +78,14 @@ def scarfPixel := {{ def main := manualMain (%doc Manual) (config := config) where - config := Config.addKaTeX { + config := Config.addSearch <| Config.addKaTeX { extraFiles := [("static", "static")], extraCss := [ "/static/colors.css", "/static/theme.css", "/static/print.css", "/static/search/search-box.css", + "/static/search/search-highlight.css", "/static/fonts/source-serif/source-serif-text.css", "/static/fonts/source-code-pro/source-code-pro.css", "/static/fonts/source-sans/source-sans-3.css", @@ -95,9 +93,10 @@ where ], extraJs := [ -- Search box - "/static/search/fuzzysort.js", + {filename := "/static/search/fuzzysort.js"}, + {filename := "/static/search/search-highlight.js", after := #["searchIndex.js"], defer := true}, -- Print stylesheet improvements - "/static/print.js" + {filename := "/static/print.js"} ], extraHead := #[searchModule, plausible], extraContents := #[scarfPixel], diff --git a/Manual.lean b/Manual.lean index 59bb84c9..93836e4c 100644 --- a/Manual.lean +++ b/Manual.lean @@ -14,8 +14,10 @@ import Manual.Defs import Manual.Classes import Manual.Axioms import Manual.Terms +import Manual.ErrorExplanations import Manual.Tactics import Manual.Simp +import Manual.Grind import Manual.BasicTypes import Manual.BasicProps import Manual.NotationsMacros @@ -26,6 +28,7 @@ import Manual.BuildTools import Manual.Releases import Manual.Namespaces import Manual.Runtime +import Manual.ModuleSystem open Verso.Genre Manual open Verso.Genre.Manual.InlineLean @@ -36,6 +39,7 @@ set_option maxRecDepth 1024 #doc (Manual) "The Lean Language Reference" => %%% tag := "lean-language-reference" +shortContextTitle := "Lean Reference" %%% This is the _Lean Language Reference_. @@ -92,6 +96,8 @@ Thus, this reference manual does not draw a barrier between the two aspects, but {include 0 Manual.Simp} +{include 0 Manual.Grind} + {include 0 Manual.BasicProps} {include 0 Manual.BasicTypes} @@ -129,6 +135,10 @@ Overview of the standard library, including types from the prelude and those tha {include 0 Manual.BuildTools} +{include 0 Manual.ErrorExplanations} + +{include 0 Manual.ModuleSystem} + {include 0 Manual.Releases} # Index diff --git a/Manual/Axioms.lean b/Manual/Axioms.lean index c855d8a2..7a8b0685 100644 --- a/Manual/Axioms.lean +++ b/Manual/Axioms.lean @@ -147,7 +147,7 @@ partial def List.length' : List α → Nat | _ :: xs => xs.length' + 1 ``` ```leanOutput otherZero2 -failed to compile definition, consider marking it as 'noncomputable' because it depends on 'Nat.otherZero', and it does not have executable code +axiom 'Nat.otherZero' not supported by code generator; consider marking definition as 'noncomputable' ``` Axioms used in proofs rather than programs do not prevent a function from being compiled. diff --git a/Manual/BasicProps.lean b/Manual/BasicProps.lean index 6378659c..f409c5a2 100644 --- a/Manual/BasicProps.lean +++ b/Manual/BasicProps.lean @@ -418,9 +418,9 @@ end :::::leanSection ::::example "Heterogeneous Equality" -````lean (show := false) +```lean (show := false) variable {α : Type u} {n k l₁ l₂ l₃ : Nat} -```` +``` The type {lean}`Vector α n` is wrapper around an {lean}`Array α` that includes a proof that the array has size {lean}`n`. Appending {name}`Vector`s is associative, but this fact cannot be straightforwardly stated using ordinary propositional equality: diff --git a/Manual/BasicTypes/Array/FFI.lean b/Manual/BasicTypes/Array/FFI.lean index 0a957ce9..9eb39f88 100644 --- a/Manual/BasicTypes/Array/FFI.lean +++ b/Manual/BasicTypes/Array/FFI.lean @@ -33,17 +33,17 @@ The representation of arrays in C. See {ref "array-runtime"}[the description of ::: :::ffi "lean_is_array" -```` +``` bool lean_is_array(lean_object * o) -```` +``` Returns `true` if `o` is an array, or `false` otherwise. ::: :::ffi "lean_to_array" -```` +``` lean_array_object * lean_to_array(lean_object * o) -```` +``` Performs a runtime check that `o` is indeed an array. If `o` is not an array, an assertion fails. ::: diff --git a/Manual/BasicTypes/BitVec.lean b/Manual/BasicTypes/BitVec.lean index a6eba5a5..93af1445 100644 --- a/Manual/BasicTypes/BitVec.lean +++ b/Manual/BasicTypes/BitVec.lean @@ -297,13 +297,13 @@ These operations treat bitvectors as sequences of bits, rather than as encodings {docstring BitVec.getMsbD} -{docstring BitVec.getMsb'} +{docstring BitVec.getMsb} {docstring BitVec.getMsb?} {docstring BitVec.getLsbD} -{docstring BitVec.getLsb'} +{docstring BitVec.getLsb} {docstring BitVec.getLsb?} diff --git a/Manual/BasicTypes/Char.lean b/Manual/BasicTypes/Char.lean index 78c3cb20..c114cf59 100644 --- a/Manual/BasicTypes/Char.lean +++ b/Manual/BasicTypes/Char.lean @@ -44,7 +44,7 @@ tag := "char-runtime" As a {ref "inductive-types-trivial-wrappers"}[trivial wrapper], characters are represented identically to {lean}`UInt32`. In particular, characters are represented as 32-bit immediate values in monomorphic contexts. In other words, a field of a constructor or structure of type {lean}`Char` does not require indirection to access. -In polymorphic contexts, characters are boxed. +In polymorphic contexts, characters are {tech}[boxed]. # Syntax diff --git a/Manual/BasicTypes/Fin.lean b/Manual/BasicTypes/Fin.lean index 3c50e3e5..469c9b87 100644 --- a/Manual/BasicTypes/Fin.lean +++ b/Manual/BasicTypes/Fin.lean @@ -102,7 +102,7 @@ numerals are polymorphic in Lean, but the numeral `0` cannot be used in a contex Fin 0 due to the absence of the instance above -Additional diagnostic information may be available using the `set_option diagnostics true` command. +Hint: Additional diagnostic information may be available using the `set_option diagnostics true` command. ``` ```lean (error := true) (name := finK) @@ -115,7 +115,7 @@ numerals are polymorphic in Lean, but the numeral `0` cannot be used in a contex Fin k due to the absence of the instance above -Additional diagnostic information may be available using the `set_option diagnostics true` command. +Hint: Additional diagnostic information may be available using the `set_option diagnostics true` command. ``` ::: diff --git a/Manual/BasicTypes/Int.lean b/Manual/BasicTypes/Int.lean index 2cc95871..ef7141dc 100644 --- a/Manual/BasicTypes/Int.lean +++ b/Manual/BasicTypes/Int.lean @@ -25,7 +25,7 @@ Integers are specially supported by Lean's implementation. The logical model of the integers is based on the natural numbers: each integer is modeled as either a natural number or the negative successor of a natural number. Operations on the integers are specified using this model, which is used in the kernel and in interpreted code. In these contexts, integer code inherits the performance benefits of the natural numbers' special support. -In compiled code, integers are represented as efficient arbitrary-precision integers, and sufficiently small numbers are stored as unboxed values that don't require indirection through a pointer. +In compiled code, integers are represented as efficient arbitrary-precision integers, and sufficiently small numbers are stored as values that don't require indirection through a pointer. Arithmetic operations are implemented by primitives that take advantage of the efficient representations. # Logical Model @@ -46,7 +46,7 @@ Integers can also be represented as a pair of natural numbers in which one is su tag := "int-runtime" %%% -Like {ref "nat-runtime"}[natural numbers], sufficiently-small integers are represented as unboxed values: the lowest-order bit in an object pointer is used to indicate that the value is not, in fact, a pointer. +Like {ref "nat-runtime"}[natural numbers], sufficiently-small integers are represented without pointers: the lowest-order bit in an object pointer is used to indicate that the value is not, in fact, a pointer. If an integer is too large to fit in the remaining bits, it is instead allocated as an ordinary Lean object that consists of an object header and an arbitrary-precision integer. # Syntax diff --git a/Manual/BasicTypes/Maps.lean b/Manual/BasicTypes/Maps.lean index e0052c9b..08de6a80 100644 --- a/Manual/BasicTypes/Maps.lean +++ b/Manual/BasicTypes/Maps.lean @@ -195,7 +195,12 @@ structure Maze where This definition is rejected: ```leanOutput badNesting -(kernel) arg #1 of '_nested.Std.HashMap_1.mk' contains a non valid occurrence of the datatypes being declared +(kernel) application type mismatch + DHashMap.Raw.WF inner +argument has type + _nested.Std.DHashMap.Raw_3 +but function has type + (DHashMap.Raw String fun x => Maze) → Prop ``` Making this work requires separating the well-formedness predicates from the structure. diff --git a/Manual/BasicTypes/Maps/TreeMap.lean b/Manual/BasicTypes/Maps/TreeMap.lean index ac1fbfeb..18b2689d 100644 --- a/Manual/BasicTypes/Maps/TreeMap.lean +++ b/Manual/BasicTypes/Maps/TreeMap.lean @@ -109,10 +109,6 @@ The declarations in this section should be imported using `import Std.TreeMap`. {docstring Std.TreeMap.getEntryLTD} -{docstring Std.TreeMap.getGE} - -{docstring Std.TreeMap.getGT} - {docstring Std.TreeMap.getKeyGE} {docstring Std.TreeMap.getKeyGE!} @@ -145,10 +141,6 @@ The declarations in this section should be imported using `import Std.TreeMap`. {docstring Std.TreeMap.getKeyLTD} -{docstring Std.TreeMap.getLE} - -{docstring Std.TreeMap.getLT} - {docstring Std.TreeMap.keyAtIdx} {docstring Std.TreeMap.keyAtIdx!} diff --git a/Manual/BasicTypes/Maps/TreeSet.lean b/Manual/BasicTypes/Maps/TreeSet.lean index 7511f6ef..83ceab1c 100644 --- a/Manual/BasicTypes/Maps/TreeSet.lean +++ b/Manual/BasicTypes/Maps/TreeSet.lean @@ -56,24 +56,32 @@ tag := "TreeSet" {docstring Std.TreeSet.atIdxD} +{docstring Std.TreeSet.getGE} + {docstring Std.TreeSet.getGE!} {docstring Std.TreeSet.getGE?} {docstring Std.TreeSet.getGED} +{docstring Std.TreeSet.getGT} + {docstring Std.TreeSet.getGT!} {docstring Std.TreeSet.getGT?} {docstring Std.TreeSet.getGTD} +{docstring Std.TreeSet.getLE} + {docstring Std.TreeSet.getLE!} {docstring Std.TreeSet.getLE?} {docstring Std.TreeSet.getLED} +{docstring Std.TreeSet.getLT} + {docstring Std.TreeSet.getLT!} {docstring Std.TreeSet.getLT?} diff --git a/Manual/BasicTypes/Nat.lean b/Manual/BasicTypes/Nat.lean index ff78fef5..60126ecc 100644 --- a/Manual/BasicTypes/Nat.lean +++ b/Manual/BasicTypes/Nat.lean @@ -22,7 +22,7 @@ The {deftech}[natural numbers] are nonnegative integers. Logically, they are the numbers 0, 1, 2, 3, …, generated from the constructors {lean}`Nat.zero` and {lean}`Nat.succ`. Lean imposes no upper bound on the representation of natural numbers other than physical constraints imposed by the available memory of the computer. -Because the natural numbers are fundamental to both mathematical reasoning and programming, they are specially supported by Lean's implementation. The logical model of the natural numbers is as an {tech}[inductive type], and arithmetic operations are specified using this model. In Lean's kernel, the interpreter, and compiled code, closed natural numbers are represented as efficient arbitrary-precision integers. Sufficiently small numbers are unboxed values that don't require indirection through a pointer. Arithmetic operations are implemented by primitives that take advantage of the efficient representations. +Because the natural numbers are fundamental to both mathematical reasoning and programming, they are specially supported by Lean's implementation. The logical model of the natural numbers is as an {tech}[inductive type], and arithmetic operations are specified using this model. In Lean's kernel, the interpreter, and compiled code, closed natural numbers are represented as efficient arbitrary-precision integers. Sufficiently small numbers are values that don't require indirection through a pointer. Arithmetic operations are implemented by primitives that take advantage of the efficient representations. # Logical Model %%% @@ -107,10 +107,10 @@ In the kernel, there are special `Nat` literal values that use a widely-trusted, Basic functions such as addition are overridden by primitives that use this representation. Because they are part of the kernel, if these primitives did not correspond to their definitions as Lean functions, it could undermine soundness. -In compiled code, sufficiently-small natural numbers are represented as unboxed values: the lowest-order bit in an object pointer is used to indicate that the value is not, in fact, a pointer, and the remaining bits are used to store the number. -31 bits are available on 32-bits architectures for unboxed {lean}`Nat`s, while 63 bits are available on 64-bit architectures. +In compiled code, sufficiently-small natural numbers are represented without pointer indirections: the lowest-order bit in an object pointer is used to indicate that the value is not, in fact, a pointer, and the remaining bits are used to store the number. +31 bits are available on 32-bits architectures for pointer-free {lean}`Nat`s, while 63 bits are available on 64-bit architectures. In other words, natural numbers smaller than $`2^{31} = 2,147,483,648` or $`2^{63} = 9,223,372,036,854,775,808` do not require allocations. -If an natural number is too large for the unboxed representation, it is instead allocated as an ordinary Lean object that consists of an object header and an arbitrary-precision integer value. +If an natural number is too large for this representation, it is instead allocated as an ordinary Lean object that consists of an object header and an arbitrary-precision integer value. ## Performance Notes %%% diff --git a/Manual/BasicTypes/String/FFI.lean b/Manual/BasicTypes/String/FFI.lean index c778df22..54e99dc7 100644 --- a/Manual/BasicTypes/String/FFI.lean +++ b/Manual/BasicTypes/String/FFI.lean @@ -37,17 +37,17 @@ The representation of strings in C. See {ref "string-runtime"}[the description o ::: :::ffi "lean_is_string" -```` +``` bool lean_is_string(lean_object * o) -```` +``` Returns `true` if `o` is a string, or `false` otherwise. ::: :::ffi "lean_to_string" -```` +``` lean_string_object * lean_to_string(lean_object * o) -```` +``` Performs a runtime check that `o` is indeed a string. If `o` is not a string, an assertion fails. ::: diff --git a/Manual/BasicTypes/Sum.lean b/Manual/BasicTypes/Sum.lean index 7daaac20..99a655d3 100644 --- a/Manual/BasicTypes/Sum.lean +++ b/Manual/BasicTypes/Sum.lean @@ -152,7 +152,7 @@ example : Nat ⊕ String := panic! "Cant' find it" failed to synthesize Inhabited (Nat ⊕ String) -Additional diagnostic information may be available using the `set_option diagnostics true` command. +Hint: Additional diagnostic information may be available using the `set_option diagnostics true` command. ``` The desired instance can be made available to instance synthesis using {keywordOf Lean.Parser.Term.have}`have`: diff --git a/Manual/BasicTypes/UInt.lean b/Manual/BasicTypes/UInt.lean index d2dbb414..70b6db73 100644 --- a/Manual/BasicTypes/UInt.lean +++ b/Manual/BasicTypes/UInt.lean @@ -60,13 +60,13 @@ Signed integers wrap the corresponding unsigned integers, and use a twos-complem tag := "fixed-int-runtime" %%% -In compiled code, fixed-width integer types that fit in one less bit than the platform's pointer size are always represented unboxed, without additional allocations or indirections. +In compiled code in contexts that require {tech}[boxed] representations, fixed-width integer types that fit in one less bit than the platform's pointer size are always represented without additional allocations or indirections. This always includes {lean}`Int8`, {lean}`UInt8`, {lean}`Int16`, and {lean}`UInt16`. -On 64-bit architectures, {lean}`Int32` and {lean}`UInt32` are also unboxed. -On 32-bit architectures, {lean}`Int32` and {lean}`UInt32` are boxed, which means they may be represented by a pointer to an object on the heap. -{lean}`ISize`, {lean}`USize`, {lean}`Int64` and {lean}`UInt64` are boxed on all architectures. +On 64-bit architectures, {lean}`Int32` and {lean}`UInt32` are also represented without pointers. +On 32-bit architectures, {lean}`Int32` and {lean}`UInt32` require a pointer to an object on the heap. +{lean}`ISize`, {lean}`USize`, {lean}`Int64` and {lean}`UInt64` may require pointers on all architectures. -Even though some fixed-with integer types require boxing in general, the compiler is able to represent them without boxing in code paths that use only a specific fixed-width type rather than being polymorphic, potentially after a specialization pass. +Even though some fixed-with integer types require boxing in general, the compiler is able to represent them without boxing or pointer indirections in code paths that use only a specific fixed-width type rather than being polymorphic, potentially after a specialization pass. This applies in most practical situations where these types are used: their values are represented using the corresponding unsigned fixed-width C type when a constructor parameter, function parameter, function return value, or intermediate result is known to be a fixed-width integer type. The Lean run-time system includes primitives for storing fixed-width integers in constructors of {tech}[inductive types], and the primitive operations are defined on the corresponding C types, so boxing tends to happen at the “edges” of integer calculations rather than for each intermediate result. In contexts where other types might occur, such as the contents of polymorphic containers like {name}`Array`, these types are boxed, even if an array is statically known to contain only a single fixed-width integer type.{margin}[The monomorphic array type {lean}`ByteArray` avoids boxing for arrays of {lean}`UInt8`.] @@ -103,7 +103,7 @@ def Permissions.decode (i : UInt8) : Permissions := theorem Permissions.decode_encode (p : Permissions) : p = .decode (p.encode) := by let ⟨r, w, x⟩ := p cases r <;> cases w <;> cases x <;> - simp +decide [encode, decode] + simp +decide [decode] ``` ::: diff --git a/Manual/BuildTools.lean b/Manual/BuildTools.lean index 3165a6a5..360d0c90 100644 --- a/Manual/BuildTools.lean +++ b/Manual/BuildTools.lean @@ -24,6 +24,7 @@ open Lean.Elab.Tactic.GuardMsgs.WhitespaceMode #doc (Manual) "Build Tools and Distribution" => %%% tag := "build-tools-and-distribution" +shortContextTitle := "Build Tools" %%% :::paragraph diff --git a/Manual/BuildTools/Elan.lean b/Manual/BuildTools/Elan.lean index d78e482f..45b50294 100644 --- a/Manual/BuildTools/Elan.lean +++ b/Manual/BuildTools/Elan.lean @@ -22,6 +22,7 @@ open Lean.Elab.Tactic.GuardMsgs.WhitespaceMode #doc (Manual) "Managing Toolchains with Elan" => %%% tag := "elan" +shortContextTitle := "Elan" %%% Elan is the Lean toolchain manager. diff --git a/Manual/BuildTools/Lake.lean b/Manual/BuildTools/Lake.lean index d3b72928..9e3367cc 100644 --- a/Manual/BuildTools/Lake.lean +++ b/Manual/BuildTools/Lake.lean @@ -350,20 +350,44 @@ Executables have a single `exe` facet that consists of the executable binary. ```lean (show := false) -- Always keep this in sync with the description below. It ensures that the list is complete. /-- -info: #[`module.bc, `module.bc.o, `module.c, `module.c.o, `module.c.o.export, `module.c.o.noexport, `module.deps, - `module.dynlib, `module.ilean, `module.imports, `module.leanArts, `module.o, `module.o.export, `module.o.noexport, - `module.olean, `module.precompileImports, `module.transImports] +info: module.bc +module.bc.o +module.c +module.c.o +module.c.o.export +module.c.o.noexport +module.deps +module.dynlib +module.header +module.ilean +module.imports +module.input +module.lean +module.leanArts +module.o +module.o.export +module.o.noexport +module.olean +module.olean.private +module.olean.server +module.precompileImports +module.setup +module.transImports -/ #guard_msgs in -#eval Lake.initModuleFacetConfigs.toList.toArray.map (·.1) |>.qsort (·.toString < ·.toString) +#eval Lake.initModuleFacetConfigs.toList.toArray.map (·.1) |>.qsort (·.toString < ·.toString) |>.forM (IO.println) ``` :::paragraph The facets available for modules are: +: `lean` + + The module's Lean source file. + : `leanArts` (default) - The module's Lean artifacts (`*.olean`, `*.ilean`, `*.c` files) + The module's Lean artifacts (`*.olean`, `*.ilean`, `*.c` files). : `deps` @@ -371,11 +395,19 @@ The facets available for modules are: : `olean` - The module's {tech}[`.olean` file] + The module's {tech}[`.olean` file]. {TODO}[Once module system lands fully, add docs for `olean.private` and `olean.server`] : `ilean` - The module's `.ilean` file, which is metadata used by the Lean language server + The module's `.ilean` file, which is metadata used by the Lean language server. + +: `header` + + The parsed module header of the module's source file. + +: `input` + + The module's processed Lean source file. Combines tracing the file with parsing its header. : `imports` @@ -389,13 +421,22 @@ The facets available for modules are: The transitive imports of the Lean module, as {tech}[`.olean` files]. +: `allImports` + + Both the immediate and transitive imports of the Lean module. + +: `setup` + + All of a module's dependencies: transitive local imports and shared libraries to be loaded with `--load-dynlib`. + Returns the list of shared libraries to load along with their search path. + : `c` - The C file produced by the Lean compiler + The C file produced by the Lean compiler. : `bc` - LLVM bitcode file, produced by the Lean compiler + LLVM bitcode file, produced by the Lean compiler. : `c.o` @@ -411,15 +452,15 @@ The facets available for modules are: : `bc.o` - The compiled object file, produced from the LLVM bitcode file + The compiled object file, produced from the LLVM bitcode file. : `o` - The compiled object file for the configured backend + The compiled object file for the configured backend. : `dynlib` - A shared library (e.g., for the Lean option `--load-dynlib`){TODO}[Document Lean command line options, and cross-reference from here] + A shared library (e.g., for the Lean option `--load-dynlib`){TODO}[Document Lean command line options, and cross-reference from here]. ::: @@ -438,7 +479,7 @@ Because they are Lean definitions, Lake scripts can only be defined in the Lean Restore the following once we can import enough of Lake to elaborate it -````` +```` ```lean (show := false) section open Lake DSL @@ -466,7 +507,7 @@ script "list-deps" := do ```lean (show := false) end ``` -````` +```` ::::: @@ -484,7 +525,7 @@ Lint drivers may be executables or scripts, which are run by {lake}`lint`. A test or lint driver can be configured by either setting the {tomlField Lake.PackageConfig}`testDriver` or {tomlField Lake.PackageConfig}`lintDriver` package configuration options or by tagging a script, executable, or library with the `test_driver` or `lint_driver` attribute in a Lean-format configuration file. A definition in a dependency can be used as a test or lint driver by using the `/` syntax for the appropriate configuration option. :::TODO -Restore the ``{attr}`` role for `test_driver` and `lint_driver` above. Right now, importing the attributes crashes the compiler. +Restore the `{attr}` role for `test_driver` and `lint_driver` above. Right now, importing the attributes crashes the compiler. ::: ## GitHub Release Builds diff --git a/Manual/BuildTools/Lake/CLI.lean b/Manual/BuildTools/Lake/CLI.lean index e032a1b4..d847c2c2 100644 --- a/Manual/BuildTools/Lake/CLI.lean +++ b/Manual/BuildTools/Lake/CLI.lean @@ -391,7 +391,8 @@ The initial configuration and starter files are based on the template: std library and executable; default exe executable only lib library only - math library only with a mathlib dependency + math-lax library only with a Mathlib dependency + math library with Mathlib standards for linting and workflows Templates can be suffixed with `.lean` or `.toml` to produce a Lean or TOML version of the configuration file, respectively. The default is TOML. @@ -918,7 +919,7 @@ Other hosts are not yet supported. ## Cached Cloud Builds -**These commands are still experimental.** +*These commands are still experimental.* They are likely change in future versions of Lake based on user feedback. Packages that use Reservoir cloud build archives should enable the {tomlField Lake.PackageConfig}`platformIndependent` setting. diff --git a/Manual/BuildTools/Lake/Config.lean b/Manual/BuildTools/Lake/Config.lean index 85c7e2c5..cb558f4e 100644 --- a/Manual/BuildTools/Lake/Config.lean +++ b/Manual/BuildTools/Lake/Config.lean @@ -7,8 +7,8 @@ Author: David Thrane Christiansen import VersoManual import Lean.Parser.Command -import Lake.DSL.Syntax import Lake.Config.Monad +import Lake.DSL import Manual.Meta import Manual.BuildTools.Lake.CLI @@ -179,7 +179,8 @@ name = "example-package" license := "", licenseFiles := #[FilePath.mk "LICENSE"], readmeFile := FilePath.mk "README.md", - reservoir := true}, + reservoir := true, + enableArtifactCache? := none}, configFile := FilePath.mk "lakefile", relConfigFile := FilePath.mk "lakefile", relManifestFile := FilePath.mk "lake-manifest.json", @@ -194,7 +195,8 @@ name = "example-package" postUpdateHooks := #[], buildArchive := ELIDED, testDriver := "", - lintDriver := ""} + lintDriver := "", + cacheRef? := none} ``` :::: ::::: @@ -258,7 +260,8 @@ name = "Sorting" license := "", licenseFiles := #[FilePath.mk "LICENSE"], readmeFile := FilePath.mk "README.md", - reservoir := true}, + reservoir := true, + enableArtifactCache? := none}, configFile := FilePath.mk "lakefile", relConfigFile := FilePath.mk "lakefile", relManifestFile := FilePath.mk "lake-manifest.json", @@ -341,7 +344,8 @@ name = "Sorting" postUpdateHooks := #[], buildArchive := ELIDED, testDriver := "", - lintDriver := ""} + lintDriver := "", + cacheRef? := none} ``` :::: ::::: @@ -796,13 +800,14 @@ from git $t $[@ $t]? $[/ $t]? ## Targets -{tech}[Targets] are typically added to the set of default targets by applying the `default_target` attribute, rather than by explicitly listing them. + +{tech}[Targets] are typically added to the set of default targets by applying the `default_target` attribute, rather than by explicitly listing them. :::TODO -It's presently impossible to import Lake.DSL.AttributesCore due to initialization changes, so `default_target` can't be rendered/checked as an attribute above. This should be fixed upstream. +Fix `default_target` above - it's not working on CI, but it is working locally, with the `attr` role. ::: -:::syntax attr (title := "Specifying Default Targets") (label := "attribute") +:::syntax attr (title := "Specifying Default Targets") (label := "attribute") (namespace := Lake.DSL) ```grammar default_target @@ -1019,7 +1024,7 @@ Whitespace is not permitted between the name and `.*` or `.+`. -{docstring Lake.LeanOption (allowMissing := true)} +{docstring Lake.LeanOption} {docstring Lake.Backend} diff --git a/Manual/Classes.lean b/Manual/Classes.lean index 28fc3c04..094ecf87 100644 --- a/Manual/Classes.lean +++ b/Manual/Classes.lean @@ -26,7 +26,6 @@ set_option pp.rawOnError true set_option linter.unusedVariables false - set_option maxRecDepth 100000 #doc (Manual) "Type Classes" => %%% @@ -152,14 +151,15 @@ def f [n : Nat] : n = n := rfl ```leanOutput notClass invalid binder annotation, type is not a class instance Nat -use the command `set_option checkBinderAnnotations false` to disable the check + +Note: Use the command `set_option checkBinderAnnotations false` to disable the check ``` ::: ::::example "Class vs Structure Constructors" A very small algebraic hierarchy can be represented either as structures ({name}`S.Magma`, {name}`S.Semigroup`, and {name}`S.Monoid` below), a mix of structures and classes ({name}`C1.Monoid`), or only using classes ({name}`C2.Magma`, {name}`C2.Semigroup`, and {name}`C2.Monoid`): -````lean +```lean namespace S structure Magma (α : Type u) where op : α → α → α @@ -192,7 +192,7 @@ class Monoid (α : Type u) extends Semigroup α where ident_left : ∀ x, op ident x = x ident_right : ∀ x, op x ident = x end C2 -```` +``` {name}`S.Monoid.mk` and {name}`C1.Monoid.mk` have identical signatures, because the parent of the class {name}`C1.Monoid` is not itself a class: @@ -258,7 +258,7 @@ Two instances of the same class with the same parameters are not necessarily ide ::::example "Instances are Not Unique" This implementation of binary heap insertion is buggy: -````lean +```lean structure Heap (α : Type u) where contents : Array α deriving Repr @@ -269,13 +269,13 @@ def Heap.bubbleUp [Ord α] (i : Nat) (xs : Heap α) : Heap α := else let j := i / 2 if Ord.compare xs.contents[i] xs.contents[j] == .lt then - Heap.bubbleUp j {xs with contents := xs.contents.swap i j} + Heap.bubbleUp j { xs with contents := xs.contents.swap i j } else xs def Heap.insert [Ord α] (x : α) (xs : Heap α) : Heap α := let i := xs.contents.size {xs with contents := xs.contents.push x}.bubbleUp i -```` +``` The problem is that a heap constructed with one {name}`Ord` instance may later be used with another, leading to the breaking of the heap invariant. @@ -284,7 +284,9 @@ One way to correct this is to making the heap type depend on the selected `Ord` structure Heap' (α : Type u) [Ord α] where contents : Array α -def Heap'.bubbleUp [inst : Ord α] (i : Nat) (xs : @Heap' α inst) : @Heap' α inst := +def Heap'.bubbleUp [inst : Ord α] + (i : Nat) (xs : @Heap' α inst) : + @Heap' α inst := if h : i = 0 then xs else if h : i ≥ xs.contents.size then xs else @@ -368,7 +370,7 @@ However, {name}`plusTimes2` fails, because there is no {lean}`AddMul' Nat` insta failed to synthesize AddMul' ?m.22 -Additional diagnostic information may be available using the `set_option diagnostics true` command. +Hint: Additional diagnostic information may be available using the `set_option diagnostics true` command. ``` Declaring an very general instance takes care of the problem for {lean}`Nat` and every other type: ```lean (name := plusTimes2b) diff --git a/Manual/Classes/BasicClasses.lean b/Manual/Classes/BasicClasses.lean index 38119ad6..735a5d16 100644 --- a/Manual/Classes/BasicClasses.lean +++ b/Manual/Classes/BasicClasses.lean @@ -246,7 +246,7 @@ example (f g : Nat → Nat) : Decidable (f = g) := inferInstance failed to synthesize Decidable (f = g) -Additional diagnostic information may be available using the `set_option diagnostics true` command. +Hint: Additional diagnostic information may be available using the `set_option diagnostics true` command. ``` Opening `Classical` makes every proposition decidable; however, declarations and examples that use this fact must be marked {keywordOf Lean.Parser.Command.declaration}`noncomputable` to indicate that code should not be generated for them. diff --git a/Manual/Classes/DerivingHandlers.lean b/Manual/Classes/DerivingHandlers.lean index d2cce01e..f11536d4 100644 --- a/Manual/Classes/DerivingHandlers.lean +++ b/Manual/Classes/DerivingHandlers.lean @@ -21,11 +21,27 @@ open Lean Elab Command set_option maxRecDepth 1024 set_option maxHeartbeats 650_000 -def derivableClasses : IO (Array Name) := do +/-- Classes that are part of the manual, not to be shown -/ +private def hiddenDerivable : Array Name := #[``Manual.Toml.Test] + +private def derivableClasses : IO (Array Name) := do let handlers ← derivingHandlersRef.get - let derivable := handlers.toList.map (·.fst) |>.toArray |>.qsort (·.toString < ·.toString) + let derivable := + handlers.toList.map (·.fst) + |>.toArray + |>.filter (fun x => !hiddenDerivable.contains x && !(`Lean).isPrefixOf x) + |>.qsort (·.toString < ·.toString) pure derivable + +-- When new deriving handlers are added, check that they should actually appear in the manual and +-- then update either `hiddenDerivable` or this `#guard_msgs`: +/-- +info: #[`BEq, `DecidableEq, `Hashable, `Inhabited, `Nonempty, `Ord, `Repr, `SizeOf, `TypeName] +-/ +#guard_msgs in +#eval derivableClasses + open Verso Doc Elab ArgParse in open SubVerso Highlighting in @[directive_expander derivableClassList] @@ -35,7 +51,7 @@ def derivableClassList : DirectiveExpander ArgParse.done.run args if contents.size > 0 then throwError "Expected empty directive" let classNames ← derivableClasses - let itemStx ← classNames.filter (!(`Lean).isPrefixOf ·) |>.mapM fun n => do + let itemStx ← classNames.mapM fun n => do let hl : Highlighted ← constTok n n.toString `(Inline.other {Verso.Genre.Manual.InlineLean.Inline.name with data := ToJson.toJson $(quote hl)} #[Inline.code $(quote n.toString)]) let theList ← `(Verso.Doc.Block.ul #[$[⟨#[Verso.Doc.Block.para #[$itemStx]]⟩],*]) @@ -54,6 +70,7 @@ They are provided with all of the names in the mutual block for which the instan When a handler returns {lean}`true`, no further handlers are called. Lean includes deriving handlers for the following classes: + :::derivableClassList ::: diff --git a/Manual/Classes/InstanceDecls.lean b/Manual/Classes/InstanceDecls.lean index 78886132..20f31659 100644 --- a/Manual/Classes/InstanceDecls.lean +++ b/Manual/Classes/InstanceDecls.lean @@ -142,7 +142,7 @@ with errors in both the left and right recursive calls that read: failed to synthesize BEq NatTree -Additional diagnostic information may be available using the `set_option diagnostics true` command. +Hint: Additional diagnostic information may be available using the `set_option diagnostics true` command. ``` Given a suitable recursive function, such as {lean}`NatTree.beq`: ```lean @@ -187,7 +187,7 @@ def NatRoseTree.beq : (tree1 tree2 : NatRoseTree) → Bool failed to synthesize BEq (Array NatRoseTree) -Additional diagnostic information may be available using the `set_option diagnostics true` command. +Hint: Additional diagnostic information may be available using the `set_option diagnostics true` command. ``` To solve this, a local {lean}`BEq NatRoseTree` instance may be `let`-bound: @@ -265,7 +265,7 @@ instance : DecidableEq StringList failed to synthesize Decidable (t1 = t2) -Additional diagnostic information may be available using the `set_option diagnostics true` command. +Hint: Additional diagnostic information may be available using the `set_option diagnostics true` command. ``` However, because it is an ordinary Lean function, it can recursively refer to its own explicitly-provided name: ```lean diff --git a/Manual/Classes/InstanceSynth.lean b/Manual/Classes/InstanceSynth.lean index 6461ff84..c0ddcdd2 100644 --- a/Manual/Classes/InstanceSynth.lean +++ b/Manual/Classes/InstanceSynth.lean @@ -278,7 +278,7 @@ Because instance synthesis selects the most recently defined instance, the follo failed to synthesize OneSmaller (Option Bool) Bool -Additional diagnostic information may be available using the `set_option diagnostics true` command. +Hint: Additional diagnostic information may be available using the `set_option diagnostics true` command. ``` The {lean}`OneSmaller (Option Bool) (Option Unit)` instance was selected during instance synthesis, without regard to the supplied value of `β`. ::: diff --git a/Manual/Coercions.lean b/Manual/Coercions.lean index 122fcb72..bcf06355 100644 --- a/Manual/Coercions.lean +++ b/Manual/Coercions.lean @@ -101,7 +101,7 @@ end #check Int.bdiv /-- -error: invalid field 'bdiv', the environment does not contain 'Nat.bdiv' +error: Invalid field `bdiv`: The environment does not contain `Nat.bdiv` n has type Nat @@ -122,7 +122,7 @@ The coercion from {lean}`Nat` to {lean}`Int` is not considered when looking up t example (n : Nat) := n.bdiv 2 ``` ```leanOutput natBdiv -invalid field 'bdiv', the environment does not contain 'Nat.bdiv' +Invalid field `bdiv`: The environment does not contain `Nat.bdiv` n has type Nat @@ -221,7 +221,7 @@ numerals are polymorphic in Lean, but the numeral `9` cannot be used in a contex Bin due to the absence of the instance above -Additional diagnostic information may be available using the `set_option diagnostics true` command. +Hint: Additional diagnostic information may be available using the `set_option diagnostics true` command. ``` This is because coercions are inserted in response to mismatched types, but a failure to synthesize an {name}`OfNat` instance is not a type mismatch. @@ -354,7 +354,7 @@ def tomorrow : Later String := section variable {α : Type u} ``` -:::example "Duplicate Evaluation in Coercions" +::::example "Duplicate Evaluation in Coercions" Because the contents of {lean}`Coe` instances are unfolded during coercion insertion, coercions that use their argument more than once should be careful to ensure that evaluation occurs just once. This can be done by using a helper function that is not part of the instance, or by using {keywordOf Lean.Parser.Term.let}`let` to evaluate the coerced term and then re-use its resulting value. @@ -378,16 +378,14 @@ def twice (x : α) : Twice α where instance : Coe α (Twice α) := ⟨twice⟩ ``` When the {name}`Coe` instance is unfolded, the call to {name}`twice` remains, which causes its argument to be evaluated before the body of the function is executed. -As a result, the {keywordOf Lean.Parser.Term.dbgTrace}`dbg_trace` executes just once: +As a result, the {keywordOf Lean.Parser.Term.dbgTrace}`dbg_trace` is included in the resulting term just once: ```lean (name := eval1) #eval ((dbg_trace "hello"; 5 : Nat) : Twice Nat) ``` +This used to demonstrate the effect: ```leanOutput eval1 hello ``` -```leanOutput eval1 -{ first := 5, second := 5, first_eq_second := _ } -``` Inlining the helper into the {name}`Coe` instance results in a term that duplicates the {keywordOf Lean.Parser.Term.dbgTrace}`dbg_trace`: ```lean (name := eval2) @@ -400,11 +398,8 @@ instance : Coe α (Twice α) where hello hello ``` -```leanOutput eval2 -{ first := 5, second := 5, first_eq_second := _ } -``` -Introducing an intermediate name for the result of the evaluation prevents the duplicated work: +Introducing an intermediate name for the result of the evaluation prevents the duplication of {keywordOf Lean.Parser.Term.dbgTrace}`dbg_trace`: ```lean (name := eval3) instance : Coe α (Twice α) where coe x := let y := x; ⟨y, y, rfl⟩ @@ -414,11 +409,8 @@ instance : Coe α (Twice α) where ```leanOutput eval3 hello ``` -```leanOutput eval3 -{ first := 5, second := 5, first_eq_second := _ } -``` -::: +:::: ```lean (show := false) end ``` @@ -485,12 +477,12 @@ Non-dependent coercions are used whenever all values of the inferred type can be :::example "Defining Dependent Coercions" The string "four" can be coerced into the natural number {lean type:="Nat"}`4` with this instance declaration: -````lean (name := fourCoe) +```lean (name := fourCoe) instance : CoeDep String "four" Nat where coe := 4 #eval ("four" : Nat) -```` +``` ```leanOutput fourCoe 4 ``` @@ -949,7 +941,7 @@ example : StringMonoid := "hello" :::example "Sort Coercions as Ordinary Coercions" The {tech}[inductive type] {name}`NatOrBool` represents the types {name}`Nat` and {name}`Bool`. -The can be coerced to the actual types {name}`Nat` and {name}`Bool`: +They can be coerced to the actual types {name}`Nat` and {name}`Bool`: ```lean inductive NatOrBool where | nat | bool diff --git a/Manual/Defs.lean b/Manual/Defs.lean index 803c8091..c8678118 100644 --- a/Manual/Defs.lean +++ b/Manual/Defs.lean @@ -345,9 +345,9 @@ def map {α β} (f : α → β) : :::::example "Iterated Automatic Implicit Parameters" :::leanSection -````lean (show := false) +```lean (show := false) variable (i : Fin n) -```` +``` Given a number bounded by {lean}`n`, represented by the type `Fin n`, an {lean}`AtLeast i` is a natural number paired with a proof that it is at least as large as as `i`. ::: ```lean @@ -368,9 +368,9 @@ def AtLeast.add (x y : AtLeast i) : AtLeast i := ::::paragraph :::leanSection -````lean (show := false) +```lean (show := false) variable (i : Fin n) -```` +``` The signature of {lean}`AtLeast.add` requires multiple rounds of automatic implicit parameter insertion. First, {lean}`i` is inserted; but its type depends on the upper bound {lean}`n` of {lean}`Fin n`. In the second round, {lean}`n` is inserted, using a machine-chosen name. @@ -418,8 +418,9 @@ def select (choices : α × α × α) : Asnwer → α ``` The resulting error message states that the argument's type is not a constant, so dot notation cannot be used in the pattern: ```leanOutput asnwer -invalid dotted identifier notation, expected type is not of the form (... → C ...) where C is a constant +Invalid dotted identifier notation: The expected type of `.yes` Asnwer +is not of the form `C ...` or `... → C ...` where C is a constant ``` This is because the signature is: ```signature @@ -468,9 +469,9 @@ def select (choices : α × α × α) : Answer → α | .maybe => choices.2.1 | .no => choices.2.2 ``` -````leanOutput noauto +```leanOutput noauto unknown identifier 'α' -```` +``` ::: :::: diff --git a/Manual/Elaboration.lean b/Manual/Elaboration.lean index f9aace03..cbd360c8 100644 --- a/Manual/Elaboration.lean +++ b/Manual/Elaboration.lean @@ -139,10 +139,10 @@ When interacting with Lean code, much more information is needed than when simpl For example, Lean's interactive environment can be used to view the types of selected expressions, to step through all the intermediate states of a proof, to view documentation, and highlight all occurrences of a bound variable. The information necessary to use Lean interactively is stored in a side table called the {deftech}_info trees_ during elaboration. -````lean (show := false) +```lean (show := false) open Lean.Elab (Info) deriving instance TypeName for Unit -```` +``` Info trees relate metadata to the user's original syntax. @@ -185,7 +185,7 @@ example (b : B) : ⟨b.1, b.2⟩ = b := rfl error: type mismatch rfl has type - ?m.848 = ?m.848 : Prop + ?m.858 = ?m.858 : Prop but is expected to have type e1 = e2 : Prop -/ diff --git a/Manual/ErrorExplanations.lean b/Manual/ErrorExplanations.lean new file mode 100644 index 00000000..1f61c16e --- /dev/null +++ b/Manual/ErrorExplanations.lean @@ -0,0 +1,94 @@ +/- +Copyright (c) 2025 Lean FRO LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Author: Joseph Rotella +-/ + +import Manual.Meta.ErrorExplanation + +open Lean +open Verso Doc Elab Genre Manual + +namespace Manual + +set_option pp.rawOnError true +set_option guard_msgs.diff true + +set_option manual.requireErrorExplanations true + +inline_extension Inline.errorExplanationLink (errorName : Name) where + data := toJson errorName + traverse := fun _ _ _ => pure none + toTeX := none + toHtml := some fun go _ data content => + open Verso.Output.Html Verso.Doc.Html.HtmlT in do + let xref ← state + let .ok name := FromJson.fromJson? (α := String) data + | logError s!"Failed to parse error explanation link JSON: expected string, but found:\n{data}" + content.mapM go + let some obj := (← read).traverseState.getDomainObject? errorExplanationDomain name + | logError s!"Could not find explanation domain entry for name '{name}'" + content.mapM go + let some id := obj.getId + | logError s!"Could not find retrieve ID from explanation domain entry for name '{name}'" + content.mapM go + if let some { path, htmlId } := xref.externalTags.get? id then + let addr := path.link (some htmlId.toString) + pure {{{{← content.mapM go}}}} + else + logError s!"Could not find external tag for error explanation '{name}' corresponding to ID '{id}'" + content.mapM go + +/- Renders the suffix of an error explanation, allowing line breaks before capital letters. -/ +inline_extension Inline.errorExplanationShortName (errorName : Name) where + data := toJson (getBreakableSuffix errorName) + traverse := fun _ _ _ => pure none + extraCss := [".error-explanation-short-name { hyphenate-character: ''; }"] + toTeX := none + toHtml := some fun _go _id info _content => + open Verso.Output Html in do + let .ok (some errorName) := fromJson? (α := Option String) info + | HtmlT.logError "Invalid data for explanation name element" + pure .empty + let html := {{ {{errorName}} }} + return html + +@[block_role_expander error_explanation_table] +def error_explanation_table : BlockRoleExpander + | #[], #[] => do + let entries ← getErrorExplanationsSorted + let columns := 4 + let header := true + let name := "error-explanation-table" + let alignment : Option TableConfig.Alignment := none + let headers ← #["Name", "Summary", "Severity", "Since"] + |>.mapM fun s => ``(Verso.Doc.Block.para #[Doc.Inline.text $(quote s)]) + let vals ← entries.flatMapM fun (name, explan) => do + let sev := quote <| if explan.metadata.severity == .warning then "Warning" else "Error" + let sev ← ``(Doc.Inline.text $sev) + let nameLink ← ``(Doc.Inline.other (Inline.errorExplanationLink $(quote name)) + #[Doc.Inline.other (Inline.errorExplanationShortName $(quote name)) #[]]) + let summary ← ``(Doc.Inline.text $(quote explan.metadata.summary)) + let since ← ``(Doc.Inline.text $(quote explan.metadata.sinceVersion)) + #[nameLink, summary, sev, since] + |>.mapM fun s => ``(Verso.Doc.Block.para #[$s]) + let blocks := (headers ++ vals).map fun c => Syntax.TSepArray.mk #[c] + pure #[← ``(Block.other (Block.table $(quote columns) $(quote header) $(quote name) $(quote alignment)) #[Block.ul #[$[Verso.Doc.ListItem.mk #[$blocks,*]],*]])] + | _, _ => throwError "unexpected syntax" + +-- Elaborating explanations can exceed the default heartbeat maximum: +set_option maxHeartbeats 1000000 + +#doc (Manual) "Error Explanations" => +%%% +number := false +htmlToc := false +%%% + +This section provides explanations of errors and warnings that may be generated +by Lean when processing a source file. All error names listed below have the +`lean` package prefix. + +{error_explanation_table} + +{make_explanations} diff --git a/Manual/Grind.lean b/Manual/Grind.lean new file mode 100644 index 00000000..5f12dbaa --- /dev/null +++ b/Manual/Grind.lean @@ -0,0 +1,2295 @@ +/- +Copyright (c) 2025 Lean FRO LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Author: Leo de Moura, Kim Morrison +-/ + +import VersoManual + +import Lean.Parser.Term + +import Manual.Meta + +-- Needed for the if-then-else normalization example. +import Std.Data.TreeMap +import Std.Data.HashMap + +open Verso.Genre Manual +open Verso.Genre.Manual.InlineLean +open Verso.Doc.Elab (CodeBlockExpander) + +open Lean.Elab.Tactic.GuardMsgs.WhitespaceMode + +set_option pp.rawOnError true + +-- TODO (@kim-em): `Lean.Grind.AddCommMonoid` and `Lean.Grind.AddCommGroup` are not yet documented. +set_option verso.docstring.allowMissing true + +set_option linter.unusedVariables false + +-- The verso default max line length is 60, which is very restrictive. +-- TODO: discuss with David. +set_option verso.code.warnLineLength 72 + +set_option maxHeartbeats 400000 -- Needed for elaboration of the `IndexMap` example. + +open Manual (comment) + +#doc (Manual) "The `grind` tactic" => +%%% +tag := "grind" +%%% + +```lean (show := false) +-- Open some namespaces for the examples. +open Lean Lean.Grind Lean.Meta.Grind +``` + +# Quick Start + +* *Availability* – {tactic}`grind` ships with Lean 4 (no extra installation) and is usable in any Lean file—just write `by grind`. No extra `import` is required beyond what your own definitions already need. + +* *Library support* – Lean’s standard library is already annotated with `@[grind]` attributes, so common lemmas are discovered automatically. Mathlib will be annotated gradually, starting with its most frequently used theories. + +* *First proof* + + ```lean + example (a b c : Nat) (h₁ : a = b) (h₂ : b = c) : + a = c := by + grind + ``` + + This succeeds instantly using congruence closure. + +* *Power examples* – showcasing {tactic}`grind`'s satellite solvers: + + * *Algebraic reasoning* (commutative‑ring solver): + + ```lean + example [CommRing α] [NoNatZeroDivisors α] (a b c : α) + : a + b + c = 3 → + a^2 + b^2 + c^2 = 5 → + a^3 + b^3 + c^3 = 7 → + a^4 + b^4 = 9 - c^4 := by + grind + ``` + + * *Finite‑field style reasoning* (works in {lean}`Fin 11`): + + ```lean + example (x y : Fin 11) : + x^2*y = 1 → x*y^2 = y → y*x = 1 := by + grind + ``` + + * *Linear integer arithmetic with case analysis*: + + ```lean + example (x y : Int) : + 27 ≤ 11*x + 13*y → + 11*x + 13*y ≤ 45 → + -10 ≤ 7*x - 9*y → + 7*x - 9*y ≤ 4 → False := by + grind + ``` + +* *Useful flags* + + * `by grind (splits := 3) (ematch := 2)` – limit case splits / E‑matching rounds. + +# What is {tactic}`grind`? + +A proof‑automation tactic inspired by modern SMT solvers. + +*Picture a virtual white‑board:* every time {tactic}`grind` discovers a new equality, inequality, or Boolean literal it writes that fact on the board, merges equivalent terms into buckets, and invites each engine to read from—and add back to—the shared white-board. The cooperating engines are: +* congruence closure, +* constraint propagation, +* E‑matching, +* guided case analysis, and +* a suite of satellite theory solvers (linear integer arithmetic, commutative rings, …). + +Lean supports dependent types and a powerful type‑class system, and {tactic}`grind` produces ordinary Lean proof terms for every fact it adds. + +# What {tactic}`grind` is *not*. + +{tactic}`grind` is *not* designed for goals whose search space explodes combinatorially—think large‑`n` pigeonhole instances, graph‑coloring reductions, high‑order N‑queens boards, or a 200‑variable Sudoku encoded as Boolean constraints. Such encodings require thousands (or millions) of case‑splits that overwhelm {tactic}`grind`’s branching search. + +* *Bit‑level or pure Boolean combinatorial problems* → use {tactic}`bv_decide`. + The {tactic}`bv_decide` tactic calls a state‑of‑the‑art SAT solver (e.g. CaDiCaL or Kissat) and then returns a *compact, machine‑checkable certificate*. All heavy search happens outside Lean; the certificate is replayed and verified inside Lean, so trust is preserved (verification time scales with certificate size). +* *Full SMT problems that need substantial case analysis across multiple theories* (arrays, bit‑vectors, rich arithmetic, quantifiers, …) → use the forthcoming *`lean‑smt`* tactic—a tight Lean front‑end for CVC5 that replays unsat cores or models inside Lean. + +# Congruence Closure + +## What is congruence closure? + +Congruence closure maintains *equivalence classes of terms* under the reflexive–symmetric–transitive closure of "is equal to" _and_ the rule that equal arguments yield equal function results. Formally, if `a = a'` and `b = b'`, then `f a b = f a' b'` is added. The algorithm merges classes until a fixed point is reached. + +Think of a *shared white‑board*: + +1. Every hypothesis `h : t₁ = t₂` writes a line connecting `t₁` and `t₂`. +2. Each merge paints both terms the same color. Soon whole constellations (`f a`, `g (f a)`, …) share the color. +3. If {lean}`True` and {lean}`False` ever land in the same color—or likewise two different constructors of the _same inductive type_ such as {lean}`none` and {lean}`some 1`—the goal is closed by contradiction. + +## How it differs from {tactic}`simp` + +* {tactic}`simp` _rewrites_ a goal, replacing occurrences of `t₁` with `t₂` as soon as it sees `h : t₁ = t₂`. The rewrite is directional and destructive. +* {tactic}`grind` _accumulates_ equalities bidirectionally. No term is rewritten; instead, both representatives live in the same class. All other engines (E‑matching, theory solvers, propagation) can query these classes and add new facts, then the closure updates incrementally. + +This makes congruence closure especially robust in the presence of symmetrical reasoning, mutual recursion, and large nestings of constructors where rewriting would duplicate work. + +## Minimal examples + +```lean +example {α} (f g : α → α) (x y : α) + (h₁ : x = y) (h₂ : f y = g y) : + f x = g x := by + -- After `h₁`, `x` and `y` share a class, + -- `h₂` adds `f y = g y`, and + -- closure bridges to `f x = g x` + grind + +example (a b c : Nat) (h : a = b) : (a, c) = (b, c) := by + -- Pair constructor obeys congruence, + -- so once `a = b` the tuples are equal + grind +``` + +# Debugging tip + +When {tactic}`grind` *fails* it prints the remaining subgoal *followed by all equivalence classes*. The two largest classes are shown as *True propositions* and *False propositions*, listing every literal currently known to be provable or refutable. Inspect these lists to spot missing facts or contradictory assumptions. + +# Constraint Propagation + +Constraint propagation works on the *True* and *False* buckets of the white‑board. Whenever a literal is added to one of those buckets, {tactic}`grind` fires dozens of small _forward rules_ to push its logical consequences: + +* Boolean connectives — e.g. if `A` is {lean}`True`, mark `A ∨ B` as {lean}`True`; if `A ∧ B` is {lean}`True`, mark both `A` and `B` as {lean}`True`; if `A ∧ B` is {lean}`False`, at least one of `A`, `B` becomes {lean}`False`. +* Inductive datatypes — two different constructors (`none` vs `some _`) collapsing into the same class yields a contradiction; equal tuples yield equal components. +* Projections and casts — from `h : (x, y) = (x', y')` we derive `x = x'` and `y = y'`; any term `cast h a` is merged with `a` immediately (using a heterogeneous equality) so both live in the same class. +* Structural eta and definitional equalities — `⟨a, b⟩.1` propagates to `a`, etc. + +Below is a _representative slice_ of the propagators so you can see the style they follow. Each follows the same skeleton: inspect the truth‑value of sub‑expressions, push equalities ({lean}`pushEq`) or truth‑values ({lean}`pushEqTrue` / {lean}`pushEqFalse`), and optionally close the goal if a contradiction ({lean}`closeGoal`) arises. A few high‑signal examples: + +```lean (show := false) +namespace ExamplePropagators +``` +```lean (keep := false) + +/-- Propagate equalities *upwards* for conjunctions. -/ +builtin_grind_propagator propagateAndUp ↑And := fun e => do + let_expr And a b := e | return () + if (← isEqTrue a) then + -- a = True ⇒ (a ∧ b) = b + pushEq e b <| + mkApp3 (mkConst ``Grind.and_eq_of_eq_true_left) + a b (← mkEqTrueProof a) + else if (← isEqTrue b) then + pushEq e a <| + mkApp3 (mkConst ``Grind.and_eq_of_eq_true_right) + a b (← mkEqTrueProof b) + else if (← isEqFalse a) then + pushEqFalse e <| + mkApp3 (mkConst ``Grind.and_eq_of_eq_false_left) + a b (← mkEqFalseProof a) + else if (← isEqFalse b) then + pushEqFalse e <| + mkApp3 (mkConst ``Grind.and_eq_of_eq_false_right) + a b (← mkEqFalseProof b) + +/-- +Truth flows *down* when the whole `And` is proven `True`. +-/ +builtin_grind_propagator propagateAndDown ↓And := + fun e => do + if (← isEqTrue e) then + let_expr And a b := e | return () + let h ← mkEqTrueProof e + pushEqTrue a <| mkApp3 + (mkConst ``Grind.eq_true_of_and_eq_true_left) a b h + pushEqTrue b <| mkApp3 + (mkConst ``Grind.eq_true_of_and_eq_true_right) a b h +``` +```lean (show := false) +end ExamplePropagators +``` + +Other frequently‑triggered propagators follow the same pattern: + +:::table (header := true) +* + * Propagator + * Handles + * Notes +* + * {lean}`propagateOrUp` / {lean}`propagateOrDown` + * `a ∨ b` + * True/False pushes for disjunctions +* + * {lean}`propagateNotUp` / {lean}`propagateNotDown` + * `¬ a` + * Links `¬ a` with the Boolean of `a` +* + * {lean}`propagateEqUp` / {lean}`propagateEqDown` + * `a = b` + * Bridges Booleans, detects constructor clash +* + * {lean}`propagateIte` / {lean}`propagateDIte` + * `ite` / `dite` + * Replaces chosen branch once condition is fixed +* + * `propagateEtaStruct` + * structures tagged `[grind ext]` + * Generates η‑expansion `a = ⟨a.1, …⟩` +::: + +:::comment +TODO (@kim-em): we don't add the `{lean}` literal type to `propagateEtaStruct` above because it is private. +::: + +Many specialized variants for {lean}`Bool` mirror these rules exactly (e.g. {lean}`propagateBoolAndUp`). + +## Propagation‑only examples + +These goals are closed *purely* by constraint propagation—no case splits, no theory solvers: + +```lean +-- Boolean connective: a && !a is always false. +example (a : Bool) : (a && !a) = false := by + grind + +-- Conditional (ite): +-- once the condition is true, ite picks the 'then' branch. +example (c : Bool) (t e : Nat) (h : c = true) : + (if c then t else e) = t := by + grind + +-- Negation propagates truth downwards. +example (a : Bool) (h : (!a) = true) : a = false := by + grind +``` + +These snippets run instantly because the relevant propagators ({lean}`propagateBoolAndUp`, {lean}`propagateIte`, {lean}`propagateBoolNotDown`) fire as soon as the hypotheses are internalized. + +> *Note* If you toggle `set_option trace.grind.eqc true`, {tactic}`grind` will print a line every time two equivalence classes merge—handy for seeing propagation in action. + +*Implementation tip* {tactic}`grind` is still under active development. Until the API has stabilized we recommend _refraining from custom elaborators or satellite solvers_. If you really need a project‑local propagator, use the user‑facing `grind_propagator` command rather than `builtin_grind_propagator` (the latter is reserved for Lean’s own code). When adding new propagators keep them *small and orthogonal*—they should fire in ≤1 µs and either push one fact or close the goal. This keeps the propagation phase predictable and easy to debug. + +We continuously expand and refine the rule set—expect the *Info View* to show increasingly rich {lean}`True`/{lean}`False` buckets over time. The full equivalence classes are displayed automatically _only when {tactic}`grind` fails_, and only for the first subgoal it could not close—use this output to inspect missing facts and understand why the subgoal remains open. + +# Case Analysis + +## Selection heuristics + +{tactic}`grind` decides which sub‑term to split on by combining three sources of signal: + +1. *Structural flags* — quick Booleans that enable whole syntactic classes: + + * `splitIte` (default {lean}`true`) → split every `if … then … else …` term. + * `splitMatch` (default {lean}`true`) → split on all `match` expressions (the {tactic}`grind` analogue of Lean’s {tactic}`split` tactic, just like `splitIte`). + * `splitImp` (default {lean}`false`) → when {lean}`true` splits on any hypothesis `A → B` whose antecedent `A` is *propositional*. Arithmetic antecedents are special‑cased: if `A` is an arithmetic literal (`≤`, `=`, `¬`, `Dvd`, …) {tactic}`grind` will split _even when `splitImp := false`_ so the integer solver can propagate facts. + +👉 Shorthand toggles: `by grind -splitIte +splitImp` expands to `by grind (splitIte := false) (splitImp := true)`. +2. *Global limit* — `splits := n` caps the *depth* of the search tree. Once a branch performs `n` splits {tactic}`grind` stops splitting further in that branch; if the branch cannot be closed it reports that the split threshold has been reached. +3. *Manual annotations* — you may mark *any* inductive predicate or structure with + + :::comment + Note this *not* a lean code block, because `Even` and `Sorted` do not exist. + TODO: replace this with a checkable example. + ::: + ``` + attribute [grind cases] Even Sorted + ``` + + and {tactic}`grind` will treat every instance of that predicate as a candidate for splitting. + +## Examples + +```lean +-- splitIte demonstration +example (c : Bool) (x y : Nat) + (h : (if c then x else y) = 0) : + x = 0 ∨ y = 0 := by + grind + +example (c : Bool) (x y : Nat) + (h : (if c then x else y) = 0) : + x = 0 ∨ y = 0 := by + -- The following fails because we need one case split + fail_if_success grind (splits := 0) + grind (splits := 1) + +-- User‑defined predicate with [grind cases] +inductive Even : Nat → Prop + | zero : Even 0 + | step : Even n → Even (n+2) + +attribute [grind cases] Even + +example (h : Even 5) : False := by + -- With the attribute, + -- grind immediately splits on the Even hypothesis + grind + +example (h : Even (n + 2)) : Even n := by + grind + +example (h : y = match x with | 0 => 1 | _ => 2) : + y > 0 := by + -- `grind` fails if we disable `splitMatch` + fail_if_success grind -splitMatch + grind +``` + +## Tips + +* Increase `splits` *only* when the goal genuinely needs deeper branching; each extra level multiplies the search space. +* Disable `splitMatch` when large pattern‑matching definitions explode the tree. +* You can combine flags: `by grind -splitMatch (splits := 10) +splitImp`. +* The `[grind cases]` attribute is *scoped*; you can use the modifiers `local`/`scoped` if you only want extra splits inside a section or namespace. + +# E‑matching + +E-matching is a mechanism used by `grind` to instantiate theorems efficiently. +It is especially effective when combined with congruence closure, enabling +`grind` to discover non-obvious consequences of equalities and annotated theorems +automatically. + +Consider the following functions and theorems: +```lean +def f (a : Nat) : Nat := + a + 1 + +def g (a : Nat) : Nat := + a - 1 + +@[grind =] +theorem gf (x : Nat) : g (f x) = x := by + simp [f, g] +``` +The theorem `gf` asserts that `g (f x) = x` for all natural numbers `x`. +The attribute `[grind =]` instructs `grind` to use the left-hand side of the equation, +`g (f x)`, as a pattern for heuristic instantiation via E-matching. +Suppose we now have a goal involving: +```lean +example {a b} (h : f b = a) : g a = b := by + grind +``` +Although `g a` is not an instance of the pattern `g (f x)`, +it becomes one modulo the equation `f b = a`. +By substituting `a` with `f b` in `g a`, we obtain the term `g (f b)`, +which matches the pattern `g (f x)` with the assignment `x := b`. +Thus, the theorem `gf` is instantiated with `x := b`, +and the new equality `g (f b) = b` is asserted. +`grind` then uses congruence closure to derive the implied equality +`g a = g (f b)` and completes the proof. + +The pattern used to instantiate theorems affects the effectiveness of `grind`. +For example, the pattern `g (f x)` is too restrictive in the following case: +the theorem `gf` will not be instantiated because the goal does not even +contain the function symbol `g`. + +```lean (error := true) +example (h₁ : f b = a) (h₂ : f c = a) : b = c := by + grind +``` + +You can use the command `grind_pattern` to manually select a pattern for a given theorem. +In the following example, we instruct `grind` to use `f x` as the pattern, +allowing it to solve the goal automatically: +```lean +grind_pattern gf => f x + +example {a b c} (h₁ : f b = a) (h₂ : f c = a) : b = c := by + grind +``` +You can enable the option `trace.grind.ematch.instance` to make `grind` print a +trace message for each theorem instance it generates. +```lean +/-- +trace: [grind.ematch.instance] gf: g (f c) = c +[grind.ematch.instance] gf: g (f b) = b +-/ +#guard_msgs (trace) in +example (h₁ : f b = a) (h₂ : f c = a) : b = c := by + set_option trace.grind.ematch.instance true in + grind +``` + +You can also specify a *multi-pattern* to control when `grind` should instantiate a theorem. +A multi-pattern requires that all specified patterns are matched in the current context +before the theorem is instantiated. This is useful for lemmas such as transitivity rules, +where multiple premises must be simultaneously present for the rule to apply. +The following example demonstrates this feature using a transitivity axiom for a binary relation `R`: +```lean (keep := false) +opaque R : Int → Int → Prop +axiom Rtrans {x y z : Int} : R x y → R y z → R x z + +grind_pattern Rtrans => R x y, R y z + +example {a b c d} : R a b → R b c → R c d → R a d := by + grind +``` +By specifying the multi-pattern `R x y, R y z`, we instruct `grind` to +instantiate `Rtrans` only when both `R x y` and `R y z` are available in the context. +In the example, `grind` applies `Rtrans` to derive `R a c` from `R a b` and `R b c`, +and can then repeat the same reasoning to deduce `R a d` from `R a c` and `R c d`. + +Instead of using `grind_pattern` to explicitly specify a pattern, +you can use the `@[grind]` attribute or one of its variants, which will use a heuristic to generate a (multi-)pattern. +The `@[grind?]` attribute displays an info message showing the pattern which was selected—this is very helpful for debugging! + +* `@[grind →]` will select a multi-pattern from the hypotheses of the theorem (i.e. it will use the theorem for forwards reasoning). + In more detail, it will traverse the hypotheses of the theorem from left-to-right, and each time it encounters a minimal indexable (i.e. has a constant as its head) subexpression which "covers" (i.e. fixes the value of) an argument which was not previously covered, it will add that subexpression as a pattern, until all arguments have been covered. This rule is described in more detail below. +* `@[grind ←]` will select a multi-pattern from the conclusion of theorem (i.e. it will use the theorem for backwards reasoning). + This may fail if not all the arguments to the theorem appear in the conclusion. +* `@[grind]` will traverse the conclusion and then the hypotheses left-to-right, adding patterns as they increase the coverage, stopping when all arguments are covered. +* `@[grind =]` checks that the conclusion of the theorem is an equality, and then uses the left-hand-side of the equality as a pattern. + This may fail if not all of the arguments appear in the left-hand-side. +* `@[grind =_]` is like `@[grind =]`, but using the right-hand-side of the equality. +* `@[grind _=_]` acts like a macro which expands to `@[grind =, grind =_]` (i.e. it will add *two* multipatterns, allowing the equality theorem to trigger in either direction). + +Although it is tempting to just use `@[grind]` by default, we recommend that when one of the other forms achieves the desired effect, you use those. +In every case, it is worthwhile to verify the chosen pattern using `@[grind?]` (which accepts all of these modifiers). + +There are also three less commonly used modifiers: + +* `@[grind =>]` traverses all the hypotheses left-to-right and then the conclusion. +* `@[grind <=]` traverses the conclusion and then all hypotheses right-to-left. +* `@[grind ←=]` is unlike the others, and it used specifically for backwards reasoning on equality. As an example, suppose we have a theorem + ```lean (keep := false) + theorem inv_eq [One α] [Mul α] [Inv α] {a b : α} (w : a * b = 1) : a⁻¹ = b := sorry + ``` + Adding `@[grind ←=]` will cause this theorem to be instantiated whenever we are trying to prove `a⁻¹ = b`, i.e. whenever we have the disequality `a⁻¹ ≠ b` (recall `grind` proves goals by contradiction). + Without special support via `←=` this instantiation would be not possible as `grind` does not consider the `=` symbol while generating patterns. + + +The rule for selecting patterns from subexpressions of the hypotheses and conclusion as described above is subtle, so we'll give some examples. + +```lean +axiom p : Nat → Nat +axiom q : Nat → Nat + +/-- info: h₁: [q #1] -/ +#guard_msgs (info) in +@[grind? →] theorem h₁ (w : 7 = p (q x)) : p (x + 1) = q x := sorry +``` + +First, to understand the output we need to recall that the `#n` appearing in patterns are arguments of the theorem, numbered as de-Bruijn variables, i.e. in reverse order (so `#0` would be `w : p (q x) = 7`, while `#1` is the implicit argument `x`). + +Why was `q #1` selected when we use `@[grind →]`? The attribute `@[grind →]` instructed grind to find patterns by traversing the hypotheses from left-to-right. +In this case, there's only the one hypothesis `p (q x) = 7`. The heuristic described above says that `grind` will search for a minimal indexable subexpression which covers a previously uncovered argument. +There's just one uncovered argument, `x`, so we're looking for a minimal expression containing that. +We can't take the whole `p (q x) = 7` because `grind` will not index on equality. The right-hand-side `7` is not helpful, because it doesn't determine the value of `x`. +We don't take `p (q x)` because it is not minimal: it has `q x` inside of it, which is indexable (its head is the constant `q`), +and it determines the value of `x`. The expression `q x` itself is minimal, because `x` is not indexable. Thus {tactic}`grind` selects `q x` as the pattern. + +Let's see some more examples: +```lean +/-- info: h₂: [p (#1 + 1)] -/ +#guard_msgs (info) in +@[grind? ←] theorem h₂ (w : 7 = p (q x)) : p (x + 1) = q x := sorry + +/-- +info: h₃: [p (#1 + 1)] +--- +info: h₃: [q #1] +-/ +#guard_msgs (info) in +@[grind? _=_] theorem h₃ (w : 7 = p (q x)) : p (x + 1) = q x := sorry + +/-- info: h₄: [p (#2 + 2), q #1] -/ +#guard_msgs (info) in +@[grind?] theorem h₄ (w : p x = q y) : p (x + 2) = 7 := sorry + +/-- +error: `@[grind ←] theorem h₅` failed to +find patterns in the theorem's conclusion, +consider using different options or the `grind_pattern` command +-/ +#guard_msgs (error) in +@[grind? ←] theorem h₅ (w : p x = q y) : p (x + 2) = 7 := sorry + +/-- info: h₆: [q (#3 + 2), p (#2 + 2)] -/ +#guard_msgs (info) in +@[grind? =>] theorem h₆ (_ : q (y + 2) = q y) (_ : q (y + 1) = q y) : p (x + 2) = 7 := sorry +``` + +If you're planning to do substantial annotation work, you should study these examples and verify that +they follow the rules described above. + +E-matching can generate too many theorem instances. Some patterns may even generate an unbounded +number of instances. For example, consider the pattern `s x` in the following example. + +```lean (error := true) +def s (x : Nat) := 0 + +@[grind =] theorem s_eq (x : Nat) : s x = s (x + 1) := + rfl + +example : s 0 > 0 := by + grind +``` + +In the example above, `grind` instantiates `s_eq` with `x := 0` which generates the term +`s 1` with is then used to instantiate `s_eq` with `x := 1` which generates the term `s 2` +and so on. The instantiation process is interrupted using the `generation` threshold. +Terms occurring in the input goal have `generation` zero. When `grind` instantiates +a theorem using terms with generation `≤ n`, the new generated terms have generation `n+1`. +You can set the maximum generation using the option `grind (gen := )`. +You can also control the number of E-matching rounds using the option `grind (ematch := )`. +In the following example, we prove that `(iota 20).length > 10` by instantiating `iota_succ` +and `List.length_cons` + +```lean +def iota : Nat → List Nat + | 0 => [] + | n+1 => n :: iota n + +@[grind =] theorem iota_succ : iota (n+1) = n :: iota n := + rfl + +example : (iota 20).length > 10 := by + grind (gen := 20) (ematch := 20) +``` + +You can set the option `set_option diagnostics true` to obtain the number of +theorem instances generated by `grind` per theorem. This is useful to detect +theorems that contain patterns that are triggering too many instances. + +:::comment +FIXME: the relevant grind diagnostic hover doesn't show up in the docs, it's obscured by generic diagnostics. +::: +```lean +set_option diagnostics true in +example : (iota 20).length > 10 := by + grind (gen := 20) (ematch := 20) +``` + +By default, `grind` uses that automatically generated equations for `match`-expressions as E-matching theorems. + +```lean +example (x y : Nat) + : x = y + 1 → + 0 < match x with + | 0 => 0 + | _+1 => 1 := by + grind +``` + +You can disable this feature by using `grind -matchEqs` + +```lean (error := true) +example (x y : Nat) + : x = y + 1 → + 0 < match x with + | 0 => 0 + | _+1 => 1 := by + grind -matchEqs +``` + +:::comment +TBD +* anti‑patterns +* local vs global attributes +* `gen` modifier? +::: + +# Linear Integer Arithmetic Solver + +The linear integer arithmetic solver, `cutsat`, implements a model-based decision procedure for linear integer arithmetic, +inspired by Section 4 of "Cutting to the Chase: Solving Linear Integer Arithmetic". +The implementation in `grind` includes several enhancements and modifications such as + +- Extended constraint support (equality and disequality). +- Optimized encoding of the `Cooper-Left` rule using a "big"-disjunction instead of fresh variables. +- Decision variable tracking for case splits (disequalities, `Cooper-Left`, `Cooper-Right`). + +The solver can process four categories of linear polynomial constraints (where `p` is a linear polynomial): +1. Equality: `p = 0` +2. Divisibility: `d ∣ p` +3. Inequality: `p ≤ 0` +4. Disequality: `p ≠ 0` + +The procedure builds a model incrementally, resolving conflicts through constraint generation. +For example, given a partial model `{x := 1}` and constraint `3 ∣ 3*y + x + 1`: +- The solve cannot extend the model to `y` because `3 ∣ 3*y + 2` is unsatisfiable. +- Thus, it resolves the conflict by generating the implied constraint `3 ∣ x + 1`. +- The new constraint forces the solver to find a new assignment for `x`. + +When assigning a variable `y`, the solver considers: +- The best upper and lower bounds (inequalities). +- A divisibility constraint. +- All disequality constraints where `y` is the maximal variable. + +The `Cooper-Left` and `Cooper-Right` rules handle the combination of inequalities and divisibility. +For unsatisfiable disequalities `p ≠ 0`, the solver generates the case split: `p + 1 ≤ 0 ∨ -p + 1 ≤ 0`. + +The following examples demonstrate goals that can be decide by `cutsat`. + +```lean +-- The left-hand-side is a multiple of 2. +example {x y : Int} : 2 * x + 4 * y ≠ 5 := by + grind + +-- Mixing equalities and inequalities. +example {x y : Int} : 2 * x + 3 * y = 0 → 1 ≤ x → y < 1 := by + grind + +-- Linear divisibility constraints. +example (a b : Int) : 2 ∣ a + 1 → 2 ∣ b + a → ¬ 2 ∣ b + 2*a := by + grind +``` + +You can disable this solver using the option `grind -cutsat`. + +```lean (error := true) +example (a b : Int) : 2 ∣ a + 1 → 2 ∣ b + a → ¬ 2 ∣ b + 2*a := by + grind -cutsat +``` + +The solver is complete for linear integer arithmetic. +The following example has a rational solution, but does not have integer ones. + +```lean +-- The following example has rational solutions, but no integer one. +example {x y : Int} + : 27 ≤ 13*x + 11*y → 13*x + 11*y ≤ 30 → + -10 ≤ 9*x - 7*y → 9*x - 7*y > 4 := by + grind +``` + +The search can become vast with very few constraints, but `cutsat` was +not designed to perform massive case-analysis. You can reduce the search +space by instructing `cutsat` to accept rational solutions using the option +`grind +qlia`. + +```lean (error := true) +example {x y : Int} + : 27 ≤ 13*x + 11*y → 13*x + 11*y ≤ 30 → + -10 ≤ 9*x - 7*y → 9*x - 7*y > 4 := by + grind +qlia +``` + +In the example above, you can inspect the rational model constructed by `cutsat` +by expanding the section "Assignment satisfying linear constraints" in the goal +diagnostics. + +The solver currently does not have support for nonlinear constraints, and treats +nonlinear terms such as `x*x` as variables. Thus, it fails to solve the following goal. +You can use the option `trace.grind.cutsat.assert` to trace all constraints processed +by `cutsat`. Note that the term `x*x` is "quoted" in `「x * x」 + 1 ≤ 0` to indicate +that `x*x` is treated as a variable. + +```lean (error := true) +example (x : Int) : x*x ≥ 0 := by + set_option trace.grind.cutsat.assert true in + grind +``` + +The solver also implements model-based theory combination. This is a mechanism for +propagating equalities back to the core module that might trigger new congruences. + +```lean +example (f : Int → Int) (x y : Int) + : f x = 0 → 0 ≤ y → y ≤ 1 → y ≠ 1 → + f (x + y) = 0 := by + grind +``` + +In the example above, the linear inequalities and disequalities imply `y = 0`, +and consequently `x = x + y`, and `f x = f (x + y)` by congruence. +Model-based theory combination increases the size of the search space, and you +can disable it using the option `grind -mbtc` + +```lean (error := true) +example (f : Int → Int) (x y : Int) + : f x = 0 → 0 ≤ y → y ≤ 1 → y ≠ 1 → + f (x + y) = 0 := by + grind -mbtc +``` + +The `cutsat` solver can also process linear constraints containing natural numbers. +It converts them into integer constraints by using `Int.ofNat`. + +```lean +example (x y z : Nat) : x < y + z → y + 1 < z → z + x < 3*z := by + grind +``` + +The solver also supports linear division and modulo operations. + +```lean +example (x y : Int) : x = y / 2 → y % 2 = 0 → y - 2*x = 0 := by + grind +``` + +The `cutsat` solver normalizes commutative (semi)ring expressions, so can solve goals like +```lean +example (a b : Nat) (h₁ : a + 1 ≠ a * b * a) (h₂ : a * a * b ≤ a + 1) : b * a^2 < a + 1 := by + grind +``` + +There is an extensible mechanism via the {lean}`Lean.Grind.ToInt` typeclass to tell cutsat that a type embeds in the integers. +Using this, we can solve goals such as: + +```lean +example (a b c : Fin 11) : a ≤ 2 → b ≤ 3 → c = a + b → c ≤ 5 := by + grind + +example (a : Fin 2) : a ≠ 0 → a ≠ 1 → False := by + grind + +example (a b c : UInt64) : a ≤ 2 → b ≤ 3 → c - a - b = 0 → c ≤ 5 := by + grind +``` + +Planned future features: improved constraint propagation. + +# Algebraic Solver (Commutative Rings, Fields) + +The `ring` solver is inspired by Gröbner basis computation procedures and term rewriting completion. +It views multivariate polynomials as rewriting rules. For example, the polynomial equality `x*y + x - 2 = 0` +is treated as a rewriting rule `x*y ↦ -x + 2`. It uses superposition to ensure the rewriting system is +confluent. Users can enable the `ring` solver for their own types by providing instances of +the following type classes, all in the `Lean.Grind` namespace. +The algebraic solvers will self-configure depending on the availability of these typeclasses, so not all need to be provided. +The capabilities of the algebraic solvers will of course degrade when some are not available. + +{docstring Lean.Grind.Semiring} + +{docstring Lean.Grind.Ring} + +{docstring Lean.Grind.CommSemiring} + +{docstring Lean.Grind.CommRing} + +{docstring Lean.Grind.IsCharP} + +{docstring Lean.Grind.AddRightCancel} + +{docstring Lean.Grind.NoNatZeroDivisors} + +{docstring Lean.Grind.Field} + +The Lean standard library contains the applicable instances for the types defined in core. +Mathlib is also pre-configured. For example, the Mathlib `CommRing` type class implements +the `Lean.Grind.CommRing α` to ensure the `ring` solver works out-of-the-box. + +The following examples demonstrate goals that can be decided by the `ring` solver. + +```lean +open Lean Grind + +example [CommRing α] (x : α) : (x + 1)*(x - 1) = x^2 - 1 := by + grind + +-- The solver "knows" that `16*16 = 0` because the +-- ring characteristic is `256`. +example [CommRing α] [IsCharP α 256] (x : α) + : (x + 16)*(x - 16) = x^2 := by + grind + +-- Types in the std library implement the appropriate type classes. +-- `UInt8` is a commutative ring with characteristic `256`. +example (x : UInt8) : (x + 16)*(x - 16) = x^2 := by + grind + +example [CommRing α] (a b c : α) + : a + b + c = 3 → + a^2 + b^2 + c^2 = 5 → + a^3 + b^3 + c^3 = 7 → + a^4 + b^4 = 9 - c^4 := by + grind + +example [CommRing α] (x y : α) + : x^2*y = 1 → x*y^2 = y → y*x = 1 := by + grind + +-- `ring` proves that `a + 1 = 2 + a` is unsatisfiable because +-- the characteristic is known. +example [CommRing α] [IsCharP α 0] (a : α) + : a + 1 = 2 + a → False := by + grind +``` + +Even when the characteristic is not initially known, when `grind` discovers that `n = 0` for some numeral `n`, it makes inferences about the charactistic: +```lean +example [CommRing α] (a b c : α) + (h₁ : a + 6 = a) (h₂ : c = c + 9) (h : b + 3*c = 0) : + 27*a + b = 0 := by + grind +``` + +The class `NoNatZeroDivisors` is used to control coefficient growth. +For example, the polynomial `2*x*y + 4*z = 0` is simplified to `x*y + 2*z = 0`. +It also used when processing disequalities. In the following example, +if you remove the local instance `[NoNatZeroDivisors α]`, the goal will not be solved. + +```lean +example [CommRing α] [NoNatZeroDivisors α] (a b : α) + : 2*a + 2*b = 0 → b ≠ -a → False := by + grind +``` + +The `ring` solver also has support for `[Field α]`. During preprocessing, +it rewrites the term `a/b` as `a*b⁻¹`. It also rewrites every disequality +`p ≠ 0` as the equality `p * p⁻¹ = 1`. This transformation is essential to +prove the following example: + +```lean +example [Field α] (a : α) + : a^2 = 0 → a = 0 := by + grind +``` + +The `ring` module also performs case-analysis for terms `a⁻¹` on whether `a` is zero or not. +In the following example, if `2*a` is zero, then `a` is also zero since +we have`NoNatZeroDivisors α`, and all terms are zero and the equality hold. Otherwise, +`ring` adds the equalities `a*a⁻¹ = 1` and `2*a*(2*a)⁻¹ = 1`, and closes the goal. + +```lean +example [Field α] [NoNatZeroDivisors α] (a : α) + : 1 / a + 1 / (2 * a) = 3 / (2 * a) := by + grind +``` + +Without `NoNatZeroDivisors`, `grind` will perform case splits on numerals being zero as needed: +```lean +example [Field α] (a : α) : (2 * a)⁻¹ = a⁻¹ / 2 := by grind +``` + +In the following example, `ring` does not need to perform any case split because +the goal contains the disequalities `y ≠ 0` and `w ≠ 0`. + +```lean +example [Field α] {x y z w : α} + : x / y = z / w → y ≠ 0 → w ≠ 0 → x * w = z * y := by + grind (splits := 0) +``` + +You can disable the `ring` solver using the option `grind -ring`. + +```lean (error := true) +example [CommRing α] (x y : α) + : x^2*y = 1 → x*y^2 = y → y*x = 1 := by + grind -ring +``` + +The `ring` solver automatically embeds `CommSemiring`s into a `CommRing` envelope (using the construction `Lean.Grind.Ring.OfSemiring.Q`). +However, the embedding is injective only when the `CommSemiring` implements the type class `AddRightCancel`. +The type `Nat` is an example of such a commutative semiring implementing `AddRightCancel`. + +```lean +example (x y : Nat) + : x^2*y = 1 → x*y^2 = y → y*x = 1 := by + grind +``` + +Gröbner basis computation can be very expensive. You can limit the number of steps performed by +the `ring` solver using the option `grind (ringSteps := )` + +```lean (error := true) +example {α} [CommRing α] [IsCharP α 0] (d t c : α) (d_inv PSO3_inv : α) + : d^2 * (d + t - d * t - 2) * (d + t + d * t) = 0 → + -d^4 * (d + t - d * t - 2) * + (2 * d + 2 * d * t - 4 * d * t^2 + 2 * d * t^4 + + 2 * d^2 * t^4 - c * (d + t + d * t)) = 0 → + d * d_inv = 1 → + (d + t - d * t - 2) * PSO3_inv = 1 → + t^2 = t + 1 := by + -- This example cannot be solved by performing at most 100 steps + grind (ringSteps := 100) +``` + +The `ring` solver propagates equalities back to the `grind` core by normalizing terms using the +computed Gröbner basis. In the following example, the equations `x^2*y = 1` and `x*y^2 = y` imply the equalities +`x = 1` and `y = 1`. Thus, the terms `x*y` and `1` are equal, and consequently `some (x*y) = some 1` +by congruence. + +```lean +example (x y : Int) + : x^2*y = 1 → x*y^2 = y → some (y*x) = some 1 := by + grind +``` + +Planned future features: support for noncommutative rings and semirings. + +# Linear Arithmetic Solver + +`grind` also contains a linear arithmetic `linarith` solver parametrized by type classes. +It self-configures depending on the availability of these type classes, so not all need to be provided. +The capabilities of the `linarith` solver will of course degrade when some are not available. +The solver ignores any type supported by `cutsat`. This module is useful for reasoning about `Real`, +ordered vector spaces, etc. + +The main type classes for module structures are `NatModule` (every `Semiring` is a `NatModule`) and `IntModule` (every `Ring` is an `IntModule`). +These may interact with the three order classes `Preorder`, `PartialOrder`, and `LinearOrder`. +(Typically a `Preorder` is enough when the context already includes a contradiction, but to prove linear inequality goals you will need a `LinearOrder`.) +To express that the additive structure in a module is compatible with the order we need `OrderedAdd`. We have limited support for ordered rings at present, represented by the typeclass `OrderedRing`. + +{docstring Lean.Grind.NatModule} + +{docstring Lean.Grind.IntModule} + +{docstring Lean.Grind.Preorder} + +{docstring Lean.Grind.PartialOrder} + +{docstring Lean.Grind.LinearOrder} + +{docstring Lean.Grind.OrderedAdd} + +{docstring Lean.Grind.OrderedRing} + +The core functionality of `linarith` is a model based solver for linear inequalities with integer coefficients. +You can disable this solver using the option `grind -linarith`. + +The following examples demonstrate goals that can be decided by the `linarith` solver. + +```lean (show := false) +section +``` +```lean +variable [IntModule α] [LinearOrder α] [OrderedAdd α] + +example (a b : α) : 2*a + b ≥ b + a + a := by grind +example (a b : α) (h : a ≤ b) : 3 * a + b ≤ 4 * b := by grind +example (a b c : α) (_ : a = b + c) (_ : 2 * b ≤ c) : + 2 * a ≤ 3 * c := by grind + +example (a b c d e : α) : + 2*a + b ≥ 0 → b ≥ 0 → c ≥ 0 → d ≥ 0 → e ≥ 0 + → a ≥ 3*c → c ≥ 6*e → d - 5*e ≥ 0 + → a + b + 3*c + d + 2*e < 0 → False := by + grind +``` +```lean (show := false) +end +``` + +```lean (show := false) +section +``` +At present we only use the `CommRing` structure to do basic normalization (e.g. identifying linear atoms `a * b` and `b * a`), +and to allow constants (with the fact `0 < 1`) and scalar multiplication on both sides. + +```lean +variable [CommRing R] [LinearOrder R] [OrderedRing R] + +example (a b : R) (h : a * b ≤ 1) : b * 3 * a + 1 ≤ 4 := by grind + +example (a b c d e f : R) : + 2*a + b ≥ 1 → b ≥ 0 → c ≥ 0 → d ≥ 0 → e*f ≥ 0 + → a ≥ 3*c → c ≥ 6*e*f → d - f*e*5 ≥ 0 + → a + b + 3*c + d + 2*e*f < 0 → False := by + grind +``` +```lean (show := false) +end +``` + +Planned future features +* Support for `NatModule` (by embedding in the Grothendieck envelope, as we already do for semirings), +* Better communication between the `ring` and `linarith` solvers. + There is currently very little communication between these two solvers. +* Non-linear arithmetic over ordered rings. + +:::comment +# Diagnostics +TBD +Threshold notices, learned equivalence classes, integer assignments, algebraic basis, performed splits, instance statistics. + +# Troubleshooting & FAQ +TBD +::: + +# Bigger Examples + +## Integrating `grind`'s features. + +This example demonstrates how the various submodules of `grind` are seamlessly integrated. In particular we can +* instantiate theorems from the library, using custom patterns, +* perform case splitting, +* do linear integer arithmetic reasoning, including modularity conditions, and +* do Gröbner basis reasoning +all without providing explicit instructions to drive the interactions between these modes of reasoning. + +For this example we'll being with a "mocked up" version of the real numbers, and the `sin` and `cos` functions. +Of course, this example works [without any changes](https://github.com/leanprover-community/mathlib4/blob/master/MathlibTest/grind/trig.lean) using Mathlib's versions of these! + +```lean +axiom R : Type + +-- TODO: a `sorry` here was causing a run-time crash. It's unclear why. +@[instance] axiom instCommRingR : Lean.Grind.CommRing R + +axiom sin : R → R +axiom cos : R → R +axiom trig_identity : ∀ x, (cos x)^2 + (sin x)^2 = 1 +``` + +Our first step is to tell grind to "put the trig identity on the whiteboard" whenever it sees a goal involving `sin` or `cos`: + +```lean +grind_pattern trig_identity => cos x +grind_pattern trig_identity => sin x +``` + +Note here we use *two* different patterns for the same theorem, so the theorem is instantiated even if `grind` sees just one of these functions. +If we preferred to more conservatively instantiate the theorem only when both `sin` and `cos` are present, we could have used a multi-pattern: + +```lean (keep := false) +grind_pattern trig_identity => cos x, sin x +``` + +For this example, either approach will work. + +Because `grind` immediately notices the trig identity, we can prove goals like this: +```lean +example : (cos x + sin x)^2 = 2 * cos x * sin x + 1 := by + grind +``` +Here `grind`: +* Notices `cos x` and `sin x`, so instantiates the trig identity. +* Notices that this is a polynomial in `CommRing R`, so sends it to the Gröbner basis module. + No calculation happens at this point: it's the first polynomial relation in this ring, so the Gröbner basis is updated to `[(cos x)^2 + (sin x)^2 - 1]`. +* Notices that the left and right hand sides of the goal are polynomials in `CommRing R`, so sends them to the Gröbner basis module for normalization. +* Since their normal forms modulo `(cos x)^2 + (sin x)^2 = 1` are equal, their equivalence classes are merged, and the goal is solved. + +We can also do this sort of argument when congruence closure is needed: +```lean +example (f : R → Nat) : + f ((cos x + sin x)^2) = f (2 * cos x * sin x + 1) := by + grind +``` + +As before, `grind` instantiates the trig identity, notices that `(cos x + sin x)^2` and `2 * cos x * sin x + 1` are equal modulo `(cos x)^2 + (sin x)^2 = 1`, +puts those algebraic expressions in the same equivalence class, and then puts the function applications `f((cos x + sin x)^2)` and `f(2 * cos x * sin x + 1)` in the same equivalence class, +and closes the goal. + +Notice that we've used arbitrary function `f : R → Nat` here; let's check that `grind` can use some linear integer arithmetic reasoning after the Gröbner basis steps: +```lean +example (f : R → Nat) : + 4 * f ((cos x + sin x)^2) ≠ 2 + f (2 * cos x * sin x + 1) := by + grind +``` + +Here `grind` first works out that this goal reduces to `4 * x ≠ 2 + x` for some `x : Nat` (i.e. by identifying the two function applications as described above), +and then uses modularity to derive a contradiction. + +Finally, we can also mix in some case splitting: +``` +example (f : R → Nat) : max 3 (4 * f ((cos x + sin x)^2)) ≠ 2 + f (2 * cos x * sin x + 1) := by + grind +``` +As before, `grind` first does the instantiation and Gröbner basis calculations required to identify the two function applications. +However the `cutsat` algorithm by itself can't do anything with `max 3 (4 * x) ≠ 2 + x`. +Next, instantiating {lean}`Nat.max_def` (automatically, because of an annotation in the standard library) which states `max n m = if n ≤ m then m else n`, +we then case split on the inequality. +In the branch `3 ≤ 4 * x`, cutsat again uses modularity to prove `4 * x ≠ 2 + x`. +In the branch `4 * x < 3`, cutsat quickly determines `x = 0`, and then notices `4 * 0 ≠ 2 + 0`. + +This has been, of course, a quite artificial example! In practice this sort of automatic integration of different reasoning modes is very powerful: +the central "whiteboard" which tracks instantiated theorems and equivalence classes can hand off relevant terms and equalities to the appropriate modules (here, `cutsat` and Gröbner bases), +which can then return new facts to the whiteboard. + +## if-then-else normalization + +```lean (show := false) +open Std +``` + +:::comment +FIXME (@david-christiansen): I'd like to be able to write ``{attr}`@[grind]` ``. +::: + +This example is a showcase for the "out of the box" power of {tactic}`grind`. +Later examples will explore adding `@[grind]` annotations as part of the development process, to make {tactic}`grind` more effective in a new domain. +This example does not rely on any of the algebra extensions to `grind`, we're just using: +* instantiation of annotated theorems from the library, +* congruence closure, and +* case splitting. + +The solution here builds on an earlier formalization by Chris Hughes, but with some notable improvements: +* the verification is separate from the code, +* the proof is now a one-liner combining {tactic}`fun_induction` and {tactic}`grind`, +* the proof is robust to changes in the code (e.g. swapping out {name}`HashMap` for {name}`TreeMap`) as well as changes to the precise verification conditions. + + +### The problem + +Here is Rustan Leino's original description of the problem, as [posted by Leonardo de Moura](https://leanprover.zulipchat.com/#narrow/stream/113488-general/topic/Rustan's.20challenge) on the Lean Zulip: + +> The data structure is an expression with Boolean literals, variables, and if-then-else expressions. +> +> The goal is to normalize such expressions into a form where: +> a) No nested ifs: the condition part of an if-expression is not itself an if-expression +> b) No constant tests: the condition part of an if-expression is not a constant +> c) No redundant ifs: the then and else branches of an if are not the same +> d) Each variable is evaluated at most once: the free variables of the condition are disjoint from those in the then branch, and also disjoint from those in the else branch. +> +> One should show that a normalization function produces an expression satisfying these four conditions, and one should also prove that the normalization function preserves the meaning of the given expression. + +### The formal statement + +:::comment +FIXME: @david-christiansen: can I give `IfExpr` a hover/linkify even though it is a forward reference? Similarly `eval` below? +::: + +To formalize the statement in Lean, we use an inductive type `IfExpr`: + +```lean +/-- An if-expression is either boolean literal, +a numbered variable, or an if-then-else expression +where each subexpression is an if-expression. -/ +inductive IfExpr + | lit : Bool → IfExpr + | var : Nat → IfExpr + | ite : IfExpr → IfExpr → IfExpr → IfExpr +deriving DecidableEq +``` + +and define some inductive predicates and an `eval` function, so we can state the four desired properties: + +```lean +namespace IfExpr + +/-- +An if-expression has a "nested if" if it contains +an if-then-else where the "if" is itself an if-then-else. +-/ +def hasNestedIf : IfExpr → Bool + | lit _ => false + | var _ => false + | ite (ite _ _ _) _ _ => true + | ite _ t e => t.hasNestedIf || e.hasNestedIf + +/-- +An if-expression has a "constant if" if it contains +an if-then-else where the "if" is itself a literal. +-/ +def hasConstantIf : IfExpr → Bool + | lit _ => false + | var _ => false + | ite (lit _) _ _ => true + | ite i t e => + i.hasConstantIf || t.hasConstantIf || e.hasConstantIf + +/-- +An if-expression has a "redundant if" if +it contains an if-then-else where +the "then" and "else" clauses are identical. +-/ +def hasRedundantIf : IfExpr → Bool + | lit _ => false + | var _ => false + | ite i t e => t == e || i.hasRedundantIf || + t.hasRedundantIf || e.hasRedundantIf + +/-- +All the variables appearing in an if-expressions, +read left to right, without removing duplicates. +-/ +def vars : IfExpr → List Nat + | lit _ => [] + | var i => [i] + | ite i t e => i.vars ++ t.vars ++ e.vars + +/-- +A helper function to specify that two lists are disjoint. +-/ +def _root_.List.disjoint {α} [DecidableEq α] : + List α → List α → Bool + | [], _ => true + | x::xs, ys => x ∉ ys && xs.disjoint ys + +/-- +An if expression evaluates each variable at most once if +for each if-then-else the variables in the "if" clause +are disjoint from the variables in the "then" clause +and the variables in the "if" clause +are disjoint from the variables in the "else" clause. +-/ +def disjoint : IfExpr → Bool + | lit _ => true + | var _ => true + | ite i t e => + i.vars.disjoint t.vars && i.vars.disjoint e.vars && + i.disjoint && t.disjoint && e.disjoint + +/-- +An if expression is "normalized" if it has +no nested, constant, or redundant ifs, +and it evaluates each variable at most once. +-/ +def normalized (e : IfExpr) : Bool := + !e.hasNestedIf && !e.hasConstantIf && + !e.hasRedundantIf && e.disjoint + +/-- +The evaluation of an if expression +at some assignment of variables. +-/ +def eval (f : Nat → Bool) : IfExpr → Bool + | lit b => b + | var i => f i + | ite i t e => bif i.eval f then t.eval f else e.eval f + +end IfExpr +``` + + +Using these we can state the problem. The challenge is to inhabit the following type (and to do so nicely!): + +:::comment +FIXME (@david-christiansen): No long line warning here? +::: +```lean +def IfNormalization : Type := + { Z : IfExpr → IfExpr // ∀ e, (Z e).normalized ∧ (Z e).eval = e.eval } +``` + +### Other solutions + +At this point, it's worth pausing and doing at least one of the following: + +:::comment +TODO (@david-christiansen): We include a link here to live-lean and an externally hosted blob of code. There's no way to keep this in sync. :-( +::: + +* Try to prove this yourself! It's quite challenging for a beginner! + You can [have a go](https://live.lean-lang.org/#project=lean-nightly&url=https%3A%2F%2Fgist.githubusercontent.com%2Fkim-em%2Ff416b31fe29de8a3f1b2b3a84e0f1793%2Fraw%2F75ca61230b50c126f8658bacd933ecf7bfcaa4b8%2Fgrind_ite.lean) + in the Live Lean editor without any installation. +* Read Chris Hughes's [solution](https://github.com/leanprover-community/mathlib4/blob/master/Archive/Examples/IfNormalization/Result.lean), + which is included in the Mathlib Archive. + This solution makes good use of Aesop, but is not ideal because + 1. It defines the solution using a subtype, simultaneously giving the construction and proving properties about it. + We think it's better stylistically to keep these separate. + 2. Even with Aesop automation, there's still about 15 lines of manual proof work before we can hand off to Aesop. +* Read Wojciech Nawrocki's [solution](https://leanprover.zulipchat.com/#narrow/channel/113488-general/topic/Rustan's.20challenge/near/398824748). + This one uses less automation, at about 300 lines of proof work. + +### The solution using {tactic}`grind` + +Actually solving the problem is not that hard: +we just need a recursive function that carries along a record of "already assigned variables", +and then, whenever performing a branch on a variable, adding a new assignment in each of the branches. +It also needs to flatten nested if-then-else expressions which have another if-then-else in the "condition" position. +(This is extracted from Chris Hughes's solution, but without the subtyping.) + +:::comment +FIXME: @david-christiansen: the long line linter complains in the next code block, but I can't wrap the options. +::: + +Let's work inside the `IfExpr` namespace. +```lean +namespace IfExpr +``` + +```lean (error := true) (name := failed_to_show_termination) (keep := false) +def normalize (assign : Std.HashMap Nat Bool) : + IfExpr → IfExpr + | lit b => lit b + | var v => + match assign[v]? with + | none => var v + | some b => lit b + | ite (lit true) t _ => normalize assign t + | ite (lit false) _ e => normalize assign e + | ite (ite a b c) t e => + normalize assign (ite a (ite b t e) (ite c t e)) + | ite (var v) t e => + match assign[v]? with + | none => + let t' := normalize (assign.insert v true) t + let e' := normalize (assign.insert v false) e + if t' = e' then t' else ite (var v) t' e' + | some b => normalize assign (ite (lit b) t e) +``` + +This is pretty straightforward, but it immediately runs into a problem: + +:::comment +This output is extremely fragile, because it includes line numbers. +I would like to stop at "Could not find a decreasing measure." +but for this we need support for showing subsets of the output. +::: +```leanOutput failed_to_show_termination +fail to show termination for + IfExpr.normalize +with errors +failed to infer structural recursion: +Cannot use parameter assign: + the type HashMap Nat Bool does not have a `.brecOn` recursor +Cannot use parameter #2: + failed to eliminate recursive application + normalize assign (a.ite (b.ite t e) (c.ite t e)) + + +Could not find a decreasing measure. +The basic measures relate at each recursive call as follows: +(<, ≤, =: relation proved, ? all proofs failed, _: no proof attempted) + #1 x2 +1) 1296:27-45 = < +2) 1297:27-45 = < +3) 1299:4-52 = ? +4) 1303:16-50 ? _ +5) 1304:16-51 _ _ +6) 1306:16-50 _ _ + +#1: assign + +Please use `termination_by` to specify a decreasing measure. +``` + + +Lean here is telling us that it can't see that the function is terminating. +Often Lean is pretty good at working this out for itself, but for sufficiently complicated functions +we need to step in to give it a hint. + +In this case we can see that it's the recursive call +`ite (ite a b c) t e` which is calling {lean}`normalize` on `(ite a (ite b t e) (ite c t e))` +where Lean is having difficulty. Lean has made a guess at a plausible termination measure, +based on using automatically generated `sizeOf` function, but can't prove the resulting goal, +essentially because `t` and `e` appear multiple times in the recursive call. + +To address problems like this, we nearly always want to stop using the automatically generated `sizeOf` function, +and construct our own termination measure. We'll use + +```lean +@[simp] def normSize : IfExpr → Nat + | lit _ => 0 + | var _ => 1 + | .ite i t e => 2 * normSize i + max (normSize t) (normSize e) + 1 +``` + + +Many different functions would work here. The basic idea is to increase the "weight" of the "condition" branch +(this is the multiplicative factor in the `2 * normSize i` ), +so that as long the "condition" part shrinks a bit, the whole expression counts as shrinking even if the "then" and "else" branches have grown. +We've annotated the definition with `@[simp]` so Lean's automated termination checker is allowed to unfold the definition. + +With this in place, the definition goes through using the {keywordOf Lean.Parser.Command.declaration}`termination_by` clause: + +:::keepEnv +```lean +def normalize (assign : Std.HashMap Nat Bool) : + IfExpr → IfExpr + | lit b => lit b + | var v => + match assign[v]? with + | none => var v + | some b => lit b + | ite (lit true) t _ => normalize assign t + | ite (lit false) _ e => normalize assign e + | ite (ite a b c) t e => + normalize assign (ite a (ite b t e) (ite c t e)) + | ite (var v) t e => + match assign[v]? with + | none => + let t' := normalize (assign.insert v true) t + let e' := normalize (assign.insert v false) e + if t' = e' then t' else ite (var v) t' e' + | some b => normalize assign (ite (lit b) t e) +termination_by e => e.normSize +``` + +Now it's time to prove some properties of this function. +We're just going to package together all the properties we want: + +```lean (keep := false) +theorem normalize_spec + (assign : Std.HashMap Nat Bool) (e : IfExpr) : + (normalize assign e).normalized + ∧ (∀ f, (normalize assign e).eval f = + e.eval fun w => assign[w]?.getD (f w)) + ∧ ∀ (v : Nat), + v ∈ vars (normalize assign e) → ¬ v ∈ assign := + sorry +``` + +That is: +* the result of {lean}`normalize` is actually normalized according to the initial definitions, +* if we normalize an "if-then-else" expression using some assignments, and then evaluate the remaining variables, + we get the same result as evaluating the original "if-then-else" using the composite of the two assignments, +* and any variable appearing in the assignments no longer appears in the normalized expression. + +You might think that we should state these three properties as separate lemmas, +but it turns out that proving them all at once is really convenient, because we can use the {tactic}`fun_induction` +tactic to assume that all these properties hold for {lean}`normalize` in the recursive calls, and then +{tactic}`grind` will just put all the facts together for the result: + +```lean +-- We tell `grind` to unfold our definitions above. +attribute [local grind] + normalized hasNestedIf hasConstantIf hasRedundantIf + disjoint vars eval List.disjoint + +theorem normalize_spec + (assign : Std.HashMap Nat Bool) (e : IfExpr) : + (normalize assign e).normalized + ∧ (∀ f, (normalize assign e).eval f = + e.eval fun w => assign[w]?.getD (f w)) + ∧ ∀ (v : Nat), + v ∈ vars (normalize assign e) → ¬ v ∈ assign := by + fun_induction normalize with grind +``` + +The fact that the {tactic}`fun_induction` plus {tactic}`grind` combination just works here is sort of astonishing. +We're really excited about this, and we're hoping to see a lot more proofs in this style! + +A lovely consequence of highly automated proofs is that often you have some flexibility to change the statements, +without changing the proof at all! As examples, the particular way that we asserted above that +"any variable appearing in the assignments no longer appears in the normalized expression" +could be stated in many different ways (although not omitted!). The variations really don't matter, +and {tactic}`grind` can both prove, and use, any of them: + +Here we use `assign.contains v = false`: +```lean +example (assign : Std.HashMap Nat Bool) (e : IfExpr) : + (normalize assign e).normalized + ∧ (∀ f, (normalize assign e).eval f = + e.eval fun w => assign[w]?.getD (f w)) + ∧ ∀ (v : Nat), v ∈ vars (normalize assign e) → + assign.contains v = false := by + fun_induction normalize with grind +``` + +and here we use `assign[v]? = none`: + +```lean +example (assign : Std.HashMap Nat Bool) (e : IfExpr) : + (normalize assign e).normalized + ∧ (∀ f, (normalize assign e).eval f = + e.eval fun w => assign[w]?.getD (f w)) + ∧ ∀ (v : Nat), + v ∈ vars (normalize assign e) → assign[v]? = none := by + fun_induction normalize with grind +``` + +In fact, it's also of no consequence to `grind` whether we use a +{name}`HashMap` or a {name}`TreeMap` to store the assignments, +we can simply switch that implementation detail out, without having to touch the proofs: + +::: + + +```lean (show := false) +-- We have to repeat these annotations because we've rolled back the environment to before we defined `normalize`. +attribute [local grind] + normalized hasNestedIf hasConstantIf hasRedundantIf + disjoint vars eval List.disjoint +``` +```lean +def normalize (assign : Std.TreeMap Nat Bool) : + IfExpr → IfExpr + | lit b => lit b + | var v => + match assign[v]? with + | none => var v + | some b => lit b + | ite (lit true) t _ => normalize assign t + | ite (lit false) _ e => normalize assign e + | ite (ite a b c) t e => + normalize assign (ite a (ite b t e) (ite c t e)) + | ite (var v) t e => + match assign[v]? with + | none => + let t' := normalize (assign.insert v true) t + let e' := normalize (assign.insert v false) e + if t' = e' then t' else ite (var v) t' e' + | some b => normalize assign (ite (lit b) t e) +termination_by e => e.normSize + +theorem normalize_spec + (assign : Std.TreeMap Nat Bool) (e : IfExpr) : + (normalize assign e).normalized + ∧ (∀ f, (normalize assign e).eval f = + e.eval fun w => assign[w]?.getD (f w)) + ∧ ∀ (v : Nat), + v ∈ vars (normalize assign e) → ¬ v ∈ assign := by + fun_induction normalize with grind +``` + +(The fact that we can do this relies on the fact that all the lemmas for both `HashMap` and for `TreeMap` that `grind` needs have already be annotated in the standard library.) + +If you'd like to play around with this code, +you can find the whole file [here](https://github.com/leanprover/lean4/blob/master/tests/lean/run/grind_ite.lean), +or in fact [play with it with no installation](https://live.lean-lang.org/#project=lean-nightly&url=https%3A%2F%2Fraw.githubusercontent.com%2Fleanprover%2Flean4%2Frefs%2Fheads%2Fmaster%2Ftests%2Flean%2Frun%2Fgrind_ite.lean) +in the Live Lean editor. + +```lean (show := false) +end IfExpr +``` + +## `IndexMap` + +In this section we'll build an example of a new data structure and basic API for it, illustrating the use of {tactic}`grind`. + +The example will be derived from Rust's [`indexmap`](https://docs.rs/indexmap/latest/indexmap/) data structure. + +`IndexMap` is intended as a replacement for `HashMap` (in particular, it has fast hash-based lookup), but allowing the user to maintain control of the order of the elements. +We won't give a complete API, just set up some basic functions and theorems about them. + +The two main functions we'll implement for now are `insert` and `eraseSwap`: +* `insert k v` checks if `k` is already in the map. If so, it replaces the value with `v`, keeping `k` in the same position in the ordering. + If it is not already in the map, `insert` adds `(k, v)` to the end of the map. +* `eraseSwap k` removes the element with key `k` from the map, and swaps it with the last element of the map (or does nothing if `k` is not in the map). + (This semantics is maybe slightly surprising: this function exists because it is an efficient way to erase element, when you don't care about the order of the remaining elements. + Another function, not implemented here, would preserve the order of the remaining elements, but at the cost of running in time proportional to the number of elements after the erased element.) + +Our goals will be: +* complete encapsulation: the implementation of `IndexMap` is hidden from the users, *and* the theorems about the implementation details are private. +* to use `grind` as much as possible: we'll preferring adding a private theorem and annotating it with `@[grind]` over writing a longer proof whenever practical. +* to use auto-parameters as much as possible, so that we don't even see the proofs, as they're mostly handled invisibly by `grind`. + +To begin with, we'll write out a skeleton of what we want to achieve, liberally using `sorry` as a placeholder for all proofs. +In particular, this version makes no use of `grind`. + +```lean (keep := false) + +open Std + +structure IndexMap + (α : Type u) (β : Type v) [BEq α] [Hashable α] where + indices : HashMap α Nat + keys : Array α + values : Array β + size_keys : keys.size = values.size + WF : ∀ (i : Nat) (a : α), + keys[i]? = some a ↔ indices[a]? = some i + +namespace IndexMap + +variable {α : Type u} {β : Type v} + [BEq α] [LawfulBEq α] [Hashable α] [LawfulHashable α] +variable {m : IndexMap α β} {a : α} {b : β} {i : Nat} + +@[inline] def size (m : IndexMap α β) : Nat := + m.values.size + +def emptyWithCapacity (capacity := 8) : IndexMap α β where + indices := HashMap.emptyWithCapacity capacity + keys := Array.emptyWithCapacity capacity + values := Array.emptyWithCapacity capacity + size_keys := sorry + WF := sorry + +instance : EmptyCollection (IndexMap α β) where + emptyCollection := emptyWithCapacity + +instance : Inhabited (IndexMap α β) where + default := ∅ + +@[inline] def contains (m : IndexMap α β) + (a : α) : Bool := + m.indices.contains a + +instance : Membership α (IndexMap α β) where + mem m a := a ∈ m.indices + +instance {m : IndexMap α β} {a : α} : Decidable (a ∈ m) := + inferInstanceAs (Decidable (a ∈ m.indices)) + +@[inline] def findIdx? (m : IndexMap α β) (a : α) : Option Nat := + m.indices[a]? + +@[inline] def findIdx (m : IndexMap α β) (a : α) (h : a ∈ m) : Nat := + m.indices[a] + +@[inline] def getIdx? (m : IndexMap α β) (i : Nat) : Option β := + m.values[i]? + +@[inline] def getIdx (m : IndexMap α β) (i : Nat) + (h : i < m.size := by get_elem_tactic) : β := + m.values[i] + +instance : + GetElem? (IndexMap α β) α β (fun m a => a ∈ m) where + getElem m a h := + m.values[m.indices[a]]'(by sorry) + getElem? m a := + m.indices[a]?.bind (m.values[·]?) + getElem! m a := + m.indices[a]?.bind (m.values[·]?) |>.getD default + +instance : LawfulGetElem (IndexMap α β) α β (fun m a => a ∈ m) where + getElem?_def := sorry + getElem!_def := sorry + +@[inline] def insert (m : IndexMap α β) (a : α) (b : β) : + IndexMap α β := + match h : m.indices[a]? with + | some i => + { indices := m.indices + keys := m.keys.set i a sorry + values := m.values.set i b sorry + size_keys := sorry + WF := sorry } + | none => + { indices := m.indices.insert a m.size + keys := m.keys.push a + values := m.values.push b + size_keys := sorry + WF := sorry } + +instance : Singleton (α × β) (IndexMap α β) := + ⟨fun ⟨a, b⟩ => (∅ : IndexMap α β).insert a b⟩ + +instance : Insert (α × β) (IndexMap α β) := + ⟨fun ⟨a, b⟩ s => s.insert a b⟩ + +instance : LawfulSingleton (α × β) (IndexMap α β) := + ⟨fun _ => rfl⟩ + +/-- +Erase the key-value pair with the given key, +moving the last pair into its place in the order. +If the key is not present, the map is unchanged. +-/ +@[inline] def eraseSwap (m : IndexMap α β) (a : α) : + IndexMap α β := + match h : m.indices[a]? with + | some i => + if w : i = m.size - 1 then + { indices := m.indices.erase a + keys := m.keys.pop + values := m.values.pop + size_keys := sorry + WF := sorry } + else + let lastKey := m.keys.back sorry + let lastValue := m.values.back sorry + { indices := (m.indices.erase a).insert lastKey i + keys := m.keys.pop.set i lastKey sorry + values := m.values.pop.set i lastValue sorry + size_keys := sorry + WF := sorry } + | none => m + +/-! ### Verification theorems -/ + +theorem getIdx_findIdx (m : IndexMap α β) (a : α) + (h : a ∈ m) : + m.getIdx (m.findIdx a h) sorry = m[a] := + sorry + +theorem mem_insert (m : IndexMap α β) (a a' : α) (b : β) : + a' ∈ m.insert a b ↔ a' = a ∨ a' ∈ m := by + sorry + +theorem getElem_insert + (m : IndexMap α β) (a a' : α) (b : β) + (h : a' ∈ m.insert a b) : + (m.insert a b)[a']'h = + if h' : a' == a then b else m[a']'sorry := by + sorry + +theorem findIdx_insert_self + (m : IndexMap α β) (a : α) (b : β) : + (m.insert a b).findIdx a sorry = + if h : a ∈ m then m.findIdx a h else m.size := by + sorry + +end IndexMap +``` + +::::keepEnv +Let's get started. +We'll aspire to never writing a proof by hand, and the first step of that is to install auto-parameters for the `size_keys` and `WF` field, +so we can omit these fields whenever `grind` can prove them. +While we're modifying the definition of `IndexMap` itself, lets make all the fields private, since we're planning on having complete encapsulation. + +```lean +structure IndexMap + (α : Type u) (β : Type v) [BEq α] [Hashable α] where + private indices : HashMap α Nat + private keys : Array α + private values : Array β + private size_keys : keys.size = values.size := by grind + private WF : ∀ (i : Nat) (a : α), + keys[i]? = some a ↔ indices[a]? = some i := by grind +``` + + +```lean (show := false) +namespace IndexMap + +variable {α : Type u} {β : Type v} [BEq α] [Hashable α] +variable {m : IndexMap α β} {a : α} {b : β} {i : Nat} +``` + +Let's give `grind` access to the definition of `size`, and `size_keys` private field: + +```lean +@[inline] def size (m : IndexMap α β) : Nat := + m.values.size + +attribute [local grind] size +attribute [local grind _=_] size_keys +``` + +Our first `sorry`s in the draft version are the `size_keys` and `WF` fields in our construction of `def emptyWithCapacity`. +Surely these are trivial, and solvable by `grind`, so we simply delete those fields: +```lean +def emptyWithCapacity (capacity := 8) : IndexMap α β where + indices := HashMap.emptyWithCapacity capacity + keys := Array.emptyWithCapacity capacity + values := Array.emptyWithCapacity capacity +``` + +```lean (show := false) +instance : EmptyCollection (IndexMap α β) where + emptyCollection := emptyWithCapacity + +instance : Inhabited (IndexMap α β) where + default := ∅ + +@[inline] def contains (m : IndexMap α β) + (a : α) : Bool := + m.indices.contains a + +instance : Membership α (IndexMap α β) where + mem m a := a ∈ m.indices + +instance {m : IndexMap α β} {a : α} : Decidable (a ∈ m) := + inferInstanceAs (Decidable (a ∈ m.indices)) + +@[inline] def findIdx? (m : IndexMap α β) (a : α) : Option Nat := + m.indices[a]? + +@[inline] def findIdx (m : IndexMap α β) (a : α) (h : a ∈ m) : Nat := + m.indices[a] + +@[inline] def getIdx? (m : IndexMap α β) (i : Nat) : Option β := + m.values[i]? + +@[inline] def getIdx (m : IndexMap α β) (i : Nat) + (h : i < m.size := by get_elem_tactic) : β := + m.values[i] +``` + +Our next task is to deal with the `sorry` in our construction of the `GetElem?` instance: +```lean (keep := false) +instance : + GetElem? (IndexMap α β) α β (fun m a => a ∈ m) where + getElem m a h := + m.values[m.indices[a]]'(by sorry) + getElem? m a := + m.indices[a]?.bind (fun i => (m.values[i]?)) + getElem! m a := + m.indices[a]?.bind (fun i => (m.values[i]?)) |>.getD default +``` + +The goal at this sorry is +``` +m : IndexMap α β +a : α +h : a ∈ m +⊢ m.indices[a] < m.values.size +``` + +:::comment +FIXME (Q3): @david-christiansen: +We need to keep the goal display above in sync with the `sorry` in the code block before it. +::: + +Let's try proving this as a stand-alone theorem, via `grind`, and see where `grind` gets stuck. +Because we've added `grind` annotations for `size` and `size_keys` already, we can safely reformulate the goal as: + +```lean (name := getElem_indices_lt_1) (error := true) (keep := false) +theorem getElem_indices_lt (m : IndexMap α β) (a : α) (h : a ∈ m) : + m.indices[a] < m.size := by + grind +``` + +This fails, and looking at the message from `grind` we see that it hasn't done much: +:::comment +FIXME (Q3): @david-christiansen: +This needs a mechanism for keeping up to date. +::: +``` +[grind] Goal diagnostics ▼ + [facts] Asserted facts ▼ + [prop] a ∈ m + [prop] m.size ≤ (indices m)[a] + [prop] m.size = (values m).size + [eqc] True propositions ▼ + [prop] m.size ≤ (indices m)[a] + [prop] a ∈ m + [eqc] Equivalence classes ▼ + [] {Membership.mem, fun m a => a ∈ m} + [] {m.size, (values m).size} + [ematch] E-matching patterns ▼ + [thm] size.eq_1: [@size #4 #3 #2 #1 #0] + [thm] HashMap.contains_iff_mem: [@Membership.mem #5 (HashMap _ #4 #3 #2) _ #1 #0] + [cutsat] Assignment satisfying linear constraints ▼ + [assign] m.size := 0 + [assign] (indices m)[a] := 0 + [assign] (values m).size := 0 +``` + +An immediate problem we can see here is that +`grind` does not yet know that `a ∈ m` is the same as `a ∈ m.indices`. +Let's add this fact: + +```lean +@[local grind] private theorem mem_indices_of_mem + {m : IndexMap α β} {a : α} : + a ∈ m ↔ a ∈ m.indices := + Iff.rfl +``` + +However this proof is going to work, we know the following: +* It must use the well-formedness condition of the map. +* It can't do so without relating `m.indices[a]` and `m.indices[a]?` (because the later is what appears in the well-formedness condition). +* The expected relationship there doesn't even hold unless the map `m.indices` satisfies {lean}`LawfulGetElem`, + for which we need `[LawfulBEq α]` and `[LawfulHashable α]`. + +:::comment +TODO: I'd like to ensure there's a link to the `LawfulGetElem` instance for `HashMap`, so we can see these requirements! +::: + +Let's configure things so that those are available: + +```lean +variable [LawfulBEq α] [LawfulHashable α] + +attribute [local grind _=_] IndexMap.WF +``` + +and then give `grind` one manual hint, to relate `m.indices[a]` and `m.indices[a]?`: + +```lean (name := getElem_indices_lt_2) +theorem getElem_indices_lt (m : IndexMap α β) (a : α) (h : a ∈ m) : + m.indices[a] < m.size := by + have : m.indices[a]? = some m.indices[a] := by grind + grind +``` + +With that theorem proved, we want to make it accessible to `grind`. +We could either add `@[local grind]` before the theorem statement, +or write `attribute [local grind] getElem_indices_lt` after the theorem statement. +These will use `grind` built-in heuristics for deciding a pattern to match the theorem on. + +In this case, let's use the `grind?` attribute to see the pattern that is being generated: +```lean (name := grind?) (keep := false) +attribute [local grind?] getElem_indices_lt +``` +```leanOutput grind? (whitespace := lax) +getElem_indices_lt: + [@LE.le `[Nat] `[instLENat] + ((@getElem (HashMap #8 `[Nat] #6 #5) _ `[Nat] _ _ + (@indices _ #7 _ _ #2) #1 #0) + 1) + (@size _ _ _ _ #2)] +``` +This is not a useful pattern: it's matching on the entire conclusion of the theorem +(in fact, a normalized version of it, in which `x < y` has been replaced by `x + 1 ≤ y`). + +We want something more general: we'd like this theorem to fire whenever `grind` sees `m.indices[a]`, +and so instead of using the attribute we write a custom pattern: + +```lean +grind_pattern getElem_indices_lt => m.indices[a] +``` + +The Lean standard library uses the `get_elem_tactic` tactic as an auto-parameter for the `xs[i]` notation +(which desugars to `GetElem.getElem xs i h`, with the proof `h` generated by `get_elem_tactic`). +We'd like to not only have `grind` fill in these proofs, but even to be able to omit these proofs. +To achieve this, we add the line +```lean (show := false) +-- This block is just here as a guard: when/if the global `get_elem_tactic` uses grind, this will fail, +-- prompting us to update the sentence about "later versions of Lean" below. +example (m : HashMap Nat Nat) : (m.insert 1 2).size ≤ m.size + 1 := by + fail_if_success get_elem_tactic + sorry +``` +```lean +macro_rules | `(tactic| get_elem_tactic_extensible) => `(tactic| grind) +``` +```lean (show := false) +example (m : HashMap Nat Nat) : (m.insert 1 2).size ≤ m.size + 1 := by get_elem_tactic +``` +(In later versions of Lean this may be part of the built-in behavior.) + +We can now return to constructing our `GetElem?` instance, and simply write: +```lean +instance : + GetElem? (IndexMap α β) α β (fun m a => a ∈ m) where + getElem m a h := + m.values[m.indices[a]] + getElem? m a := + m.indices[a]?.bind (fun i => (m.values[i]?)) + getElem! m a := + m.indices[a]?.bind (fun i => (m.values[i]?)) |>.getD default +``` +with neither any `sorry`s, nor any explicitly written proofs. + +Next, we want to expose the content of these definitions, but only locally in this file: +```lean +@[local grind] private theorem getElem_def + (m : IndexMap α β) (a : α) (h : a ∈ m) : + m[a] = m.values[m.indices[a]'h] := + rfl +@[local grind] private theorem getElem?_def + (m : IndexMap α β) (a : α) : + m[a]? = m.indices[a]?.bind (fun i => (m.values[i]?)) := + rfl +@[local grind] private theorem getElem!_def + [Inhabited β] (m : IndexMap α β) (a : α) : + m[a]! = (m.indices[a]?.bind (m.values[·]?)).getD default := + rfl +``` + +Again we're using the `@[local grind] private theorem` pattern to hide these implementation details, +but allow `grind` to see these facts locally. + +Next, we want to prove the `LawfulGetElem` instance, and hope that `grind` can fill in the proofs: +```lean +instance : LawfulGetElem (IndexMap α β) α β (fun m a => a ∈ m) where + getElem?_def := by grind + getElem!_def := by grind +``` + +Success! + +Let's press onward, and see if we can define `insert` without having to write any proofs: +```lean + +@[inline] def insert [LawfulBEq α] (m : IndexMap α β) (a : α) (b : β) : + IndexMap α β := + match h : m.indices[a]? with + | some i => + { indices := m.indices + keys := m.keys.set i a + values := m.values.set i b } + | none => + { indices := m.indices.insert a m.size + keys := m.keys.push a + values := m.values.push b } +``` +In both branches, `grind` is automatically proving both the `size_keys` and `WF` fields! +Note also in the first branch the `set` calls `m.keys.set i a` and `m.values.set i b` +are having there "in-bounds" obligations automatically filled in by `grind` via the `get_elem_tactic` auto-parameter. + +Next let's try `eraseSwap`: +```lean (name := eraseSwap_1) (error := true) (keep := false) +@[inline] def eraseSwap (m : IndexMap α β) (a : α) : IndexMap α β := + match h : m.indices[a]? with + | some i => + if w : i = m.size - 1 then + { indices := m.indices.erase a + keys := m.keys.pop + values := m.values.pop } + else + let lastKey := m.keys.back + let lastValue := m.values.back + { indices := (m.indices.erase a).insert lastKey i + keys := m.keys.pop.set i lastKey + values := m.values.pop.set i lastValue } + | none => m +``` +This fails while attempting to prove the `WF` field in the second branch. +As usual, there is detailed information from `grind` about its failure state, but almost too much to be helpful! +Let's look at the model produced by `cutsat` and see if we can see what's going on: +``` +[cutsat] Assignment satisfying linear constraints ▼ + [assign] i_1 := 0 + [assign] i_2 := 1 + [assign] (keys m_1).pop.size := 2 + [assign] (keys m_1).size := 3 + [assign] m_1.size := 3 + [assign] ((keys m_1).pop.set i_1 ((keys m_1).back ⋯) ⋯).size := 2 + [assign] (values m_1).size := 3 + [assign] (indices m_1)[a_1] := 0 + [assign] (((indices m_1).erase a_1).insert ((keys m_1).back ⋯) i_1)[a_2] := 0 + [assign] ((keys m_1).set i_1 ((keys m_1).back ⋯) ⋯).pop.size := 2 + [assign] ((keys m_1).set i_1 ((keys m_1).back ⋯) ⋯).size := 3 + [assign] (indices m_1)[a_1] := 0 + [assign] (indices m_1)[a_2] := 1 + [assign] (indices m_1)[(keys m_1)[i_2]] := 1 + [assign] (indices m_1)[(keys m_1)[i_2]] := 1 +``` +:::comment +FIXME (@kim-em / @leodemoura): there is some repeated output here. +::: + +This model consists of an `IndexMap` of size `3`, +with keys `a_1`, `a_2` and the otherwise unnamed `(keys m_1).back ⋯`. + +Everything looks fine, *except* the line: +``` +(((indices m_1).erase a_1).insert ((keys m_1).back ⋯) i_1)[a_2] := 0 +``` +This shouldn't be possible! Since the three keys are distinct, +we should have +``` +(((indices m_1).erase a_1).insert ((keys m_1).back ⋯) i_1)[a_2] = + ((indices m_1).erase a_1)[a_2] = + (indices m_1)[a_2] = + 1 +``` +Now that we've found something suspicious, we can look through the equivalence classes identified by `grind`. +(In the future we'll be providing search tools for inspecting equivalence classes, but for now you need to read through manually.) +We find amongst many others: +``` +{a_2, + (keys m_1).back ⋯, + (keys m_1)[(keys m_1).size - 1], + (keys m_1)[i_2], ...} +``` +This should imply, by the injectivity of `keys`, that `i_2 = (keys m_1).size - 1`. +Since this identity *wasn't* reflected by the cutsat model, +we suspect that `grind` is not managing to use the injectivity of `keys`. + +Thinking about the way that we've provided the well-formedness condition, as +`∀ (i : Nat) (a : α), keys[i]? = some a ↔ indices[a]? = some i`, this perhaps isn't surprising: +it's expressed in terms of `keys[i]?` and `indices[a]?`. +Let's add a variant version of the well-formedness condition using `getElem` instead of `getElem?`: + +```lean +@[local grind] private theorem WF' + (i : Nat) (a : α) (h₁ : i < m.keys.size) (h₂ : a ∈ m) : + m.keys[i] = a ↔ m.indices[a] = i := by + have := m.WF i a + grind +``` +We can verify that with this available, `grind` can now prove: +```lean +example {m : IndexMap α β} {a : α} {h : a ∈ m} : + m.keys[m.indices[a]'h] = a := by grind +``` + +Trying again with `eraseSwap`, everything goes through cleanly now, with no manual proofs: +```lean +@[inline] def eraseSwap (m : IndexMap α β) (a : α) : IndexMap α β := + match h : m.indices[a]? with + | some i => + if w : i = m.size - 1 then + { indices := m.indices.erase a + keys := m.keys.pop + values := m.values.pop } + else + let lastKey := m.keys.back + let lastValue := m.values.back + { indices := (m.indices.erase a).insert lastKey i + keys := m.keys.pop.set i lastKey + values := m.values.pop.set i lastValue } + | none => m +``` + +Finally we turn to the verification theorems about the basic operations, relating `getIdx`, `findIdx`, and `insert`. +By adding a `local grind` annotation allowing `grind` to unfold the definitions of these operations, +the proofs all go through effortlessly: + +``` +/-! ### Verification theorems -/ + +attribute [local grind] getIdx findIdx insert + +@[grind] theorem getIdx_findIdx (m : IndexMap α β) (a : α) (h : a ∈ m) : + m.getIdx (m.findIdx a) = m[a] := by grind + +@[grind] theorem mem_insert (m : IndexMap α β) (a a' : α) (b : β) : + a' ∈ m.insert a b ↔ a' = a ∨ a' ∈ m := by + grind + +@[grind] theorem getElem_insert + (m : IndexMap α β) (a a' : α) (b : β) (h : a' ∈ m.insert a b) : + (m.insert a b)[a'] = if h' : a' == a then b else m[a'] := by + grind + +@[grind] theorem findIdx_insert_self + (m : IndexMap α β) (a : α) (b : β) : + (m.insert a b).findIdx a = + if h : a ∈ m then m.findIdx a else m.size := by + grind +``` + +Note that these are part of the public API of `IndexMap`, so we need to mark them as `@[grind]`, +so that users without our internal `local grind` annotations can still use them in `grind` proofs. + +:::: + +Putting this all together, our prototype API has reached the following state: + +```lean +macro_rules | `(tactic| get_elem_tactic_extensible) => `(tactic| grind) + +open Std + +structure IndexMap + (α : Type u) (β : Type v) [BEq α] [Hashable α] where + private indices : HashMap α Nat + private keys : Array α + private values : Array β + private size_keys' : keys.size = values.size := by grind + private WF : ∀ (i : Nat) (a : α), + keys[i]? = some a ↔ indices[a]? = some i := by grind + +namespace IndexMap + +variable {α : Type u} {β : Type v} [BEq α] [Hashable α] +variable {m : IndexMap α β} {a : α} {b : β} {i : Nat} + +@[inline] def size (m : IndexMap α β) : Nat := + m.values.size + +@[local grind =] private theorem size_keys : m.keys.size = m.size := + m.size_keys' + +def emptyWithCapacity (capacity := 8) : IndexMap α β where + indices := HashMap.emptyWithCapacity capacity + keys := Array.emptyWithCapacity capacity + values := Array.emptyWithCapacity capacity + +instance : EmptyCollection (IndexMap α β) where + emptyCollection := emptyWithCapacity + +instance : Inhabited (IndexMap α β) where + default := ∅ + +@[inline] def contains (m : IndexMap α β) + (a : α) : Bool := + m.indices.contains a + +instance : Membership α (IndexMap α β) where + mem m a := a ∈ m.indices + +instance {m : IndexMap α β} {a : α} : Decidable (a ∈ m) := + inferInstanceAs (Decidable (a ∈ m.indices)) + +@[local grind] private theorem mem_indices_of_mem + {m : IndexMap α β} {a : α} : + a ∈ m ↔ a ∈ m.indices := Iff.rfl + +@[inline] def findIdx? (m : IndexMap α β) (a : α) : Option Nat := + m.indices[a]? + +@[inline] def findIdx (m : IndexMap α β) (a : α) + (h : a ∈ m := by get_elem_tactic) : Nat := + m.indices[a] + +@[inline] def getIdx? (m : IndexMap α β) (i : Nat) : Option β := + m.values[i]? + +@[inline] def getIdx (m : IndexMap α β) (i : Nat) + (h : i < m.size := by get_elem_tactic) : β := + m.values[i] + +variable [LawfulBEq α] [LawfulHashable α] + +attribute [local grind _=_] IndexMap.WF + +private theorem getElem_indices_lt {h : a ∈ m} : m.indices[a] < m.size := by + have : m.indices[a]? = some m.indices[a] := by grind + grind + +grind_pattern getElem_indices_lt => m.indices[a] + +attribute [local grind] size + +instance : GetElem? (IndexMap α β) α β (fun m a => a ∈ m) where + getElem m a h := + m.values[m.indices[a]] + getElem? m a := + m.indices[a]?.bind (fun i => (m.values[i]?)) + getElem! m a := + m.indices[a]?.bind (fun i => (m.values[i]?)) |>.getD default + +@[local grind] private theorem getElem_def + (m : IndexMap α β) (a : α) (h : a ∈ m) : + m[a] = m.values[m.indices[a]'h] := + rfl +@[local grind] private theorem getElem?_def + (m : IndexMap α β) (a : α) : + m[a]? = m.indices[a]?.bind (fun i => (m.values[i]?)) := + rfl +@[local grind] private theorem getElem!_def + [Inhabited β] (m : IndexMap α β) (a : α) : + m[a]! = (m.indices[a]?.bind (m.values[·]?)).getD default := + rfl + +instance : LawfulGetElem (IndexMap α β) α β (fun m a => a ∈ m) where + getElem?_def := by grind + getElem!_def := by grind + +@[inline] def insert [LawfulBEq α] (m : IndexMap α β) (a : α) (b : β) : + IndexMap α β := + match h : m.indices[a]? with + | some i => + { indices := m.indices + keys := m.keys.set i a + values := m.values.set i b } + | none => + { indices := m.indices.insert a m.size + keys := m.keys.push a + values := m.values.push b } + +instance [LawfulBEq α] : Singleton (α × β) (IndexMap α β) := + ⟨fun ⟨a, b⟩ => (∅ : IndexMap α β).insert a b⟩ + +instance [LawfulBEq α] : Insert (α × β) (IndexMap α β) := + ⟨fun ⟨a, b⟩ s => s.insert a b⟩ + +instance [LawfulBEq α] : LawfulSingleton (α × β) (IndexMap α β) := + ⟨fun _ => rfl⟩ + +@[local grind] private theorem WF' + (i : Nat) (a : α) (h₁ : i < m.keys.size) (h₂ : a ∈ m) : + m.keys[i] = a ↔ m.indices[a] = i := by + have := m.WF i a + grind + +/-- +Erase the key-value pair with the given key, +moving the last pair into its place in the order. +If the key is not present, the map is unchanged. +-/ +@[inline] def eraseSwap (m : IndexMap α β) (a : α) : IndexMap α β := + match h : m.indices[a]? with + | some i => + if w : i = m.size - 1 then + { indices := m.indices.erase a + keys := m.keys.pop + values := m.values.pop } + else + let lastKey := m.keys.back + let lastValue := m.values.back + { indices := (m.indices.erase a).insert lastKey i + keys := m.keys.pop.set i lastKey + values := m.values.pop.set i lastValue } + | none => m + +/-! ### Verification theorems -/ + +attribute [local grind] getIdx findIdx insert + +@[grind] theorem getIdx_findIdx (m : IndexMap α β) (a : α) (h : a ∈ m) : + m.getIdx (m.findIdx a) = m[a] := by grind + +@[grind] theorem mem_insert (m : IndexMap α β) (a a' : α) (b : β) : + a' ∈ m.insert a b ↔ a' = a ∨ a' ∈ m := by + grind + +@[grind] theorem getElem_insert + (m : IndexMap α β) (a a' : α) (b : β) (h : a' ∈ m.insert a b) : + (m.insert a b)[a'] = if h' : a' == a then b else m[a'] := by + grind + +@[grind] theorem findIdx_insert_self + (m : IndexMap α β) (a : α) (b : β) : + (m.insert a b).findIdx a = + if h : a ∈ m then m.findIdx a else m.size := by + grind + +end IndexMap +``` + +We haven't yet proved all the theorems we would want about these operations (or indeed any theorems about `eraseSwap`); the interested reader is encouraged to try proving more, +and perhaps even releasing a complete `IndexMap` library! + +Summarizing the design principles discussed above about encapsulation: +* the fields of `IndexMap` are all private, as these are implementation details. +* the theorems about these fields are all private, and marked as `@[local grind]`, rather than `@[grind]`, as they won't be needed after we've set up the API. +* the verification theorems are both marked as `@[grind]`, and proved by `grind`: + the annotation is necessary because we want grind to be able to prove these facts even once we're outside the current module, and the `@[local grind]` theorems are no longer available. diff --git a/Manual/Interaction.lean b/Manual/Interaction.lean index 7a149e0c..7a81a475 100644 --- a/Manual/Interaction.lean +++ b/Manual/Interaction.lean @@ -251,7 +251,7 @@ Attempting to add a string to a natural number fails, as expected: failed to synthesize HAdd String Nat ?m.32 -Additional diagnostic information may be available using the `set_option diagnostics true` command. +Hint: Additional diagnostic information may be available using the `set_option diagnostics true` command. ``` Nonetheless, a partially-elaborated term is available: ```leanOutput oneOne @@ -363,7 +363,7 @@ def intersperse (x : α) : List α → List α ``` ```leanOutput intersperse_eqns equations: -theorem intersperse.eq_1.{u_1} : ∀ {α : Type u_1} (x y z : α) (zs : List α), +@[defeq] theorem intersperse.eq_1.{u_1} : ∀ {α : Type u_1} (x y z : α) (zs : List α), intersperse x (y :: z :: zs) = y :: x :: intersperse x (z :: zs) theorem intersperse.eq_2.{u_1} : ∀ {α : Type u_1} (x : α) (x_1 : List α), (∀ (y z : α) (zs : List α), x_1 = y :: z :: zs → False) → intersperse x x_1 = x_1 @@ -467,7 +467,7 @@ $c:command The {keywordOf Lean.guardMsgsCmd}`#guard_msgs` command can ensure that a set of test cases pass: -````lean +```lean def reverse : List α → List α := helper [] where helper acc @@ -481,7 +481,7 @@ where /-- info: ['c', 'b', 'a'] -/ #guard_msgs in #eval reverse "abc".toList -```` +``` ::: @@ -555,9 +555,10 @@ Leading and trailing whitespace is always ignored when comparing messages. On to ::: The option {option}`guard_msgs.diff` controls the content of the error message that {keywordOf Lean.guardMsgsCmd}`#guard_msgs` produces when the expected message doesn't match the produced message. -By default, the error message shows the produced message, which can be compared with the expected message in the source file. -When messages are large and only differ by a small amount, it can be difficult to spot the difference. -Setting {option}`guard_msgs.diff` to `true` causes {keywordOf Lean.guardMsgsCmd}`#guard_msgs` to instead show a line-by-line difference, with a leading `+` used to indicate lines from the produced message and a leading `-` used to indicate lines from the expected message. +By default, {keywordOf Lean.guardMsgsCmd}`#guard_msgs` shows a line-by-line difference, with a leading `+` used to indicate lines from the produced message and a leading `-` used to indicate lines from the expected message. +When messages are large and only differ by a small amount, this can make it easier to notice where they differ. +Setting {option}`guard_msgs.diff` to `false` causes {keywordOf Lean.guardMsgsCmd}`#guard_msgs` to instead show just the produced message, which can be compared with the expected message in the source file. +This can be convenient if the difference between the message is confusing or overwhelming. {optionDocs guard_msgs.diff} @@ -577,6 +578,7 @@ def Tree.big (n : Nat) : Tree Nat := However, it can be difficult to spot where test failures come from when the output is large: ```lean (error := true) (name := bigMsg) +set_option guard_msgs.diff false /-- info: Tree.branches [Tree.branches @@ -600,7 +602,7 @@ Tree.branches Tree.branches [Tree.branches [Tree.val 0], Tree.val 0]]] ``` -while the {keywordOf Lean.guardMsgsCmd}`#guard_msgs` command reports this error: +Without {option}`guard_msgs.diff`, the {keywordOf Lean.guardMsgsCmd}`#guard_msgs` command reports this error: ```leanOutput bigMsg (severity := error) ❌️ Docstring on `#guard_msgs` does not match generated message: diff --git a/Manual/Intro.lean b/Manual/Intro.lean index 13290ee1..1ddd5d79 100644 --- a/Manual/Intro.lean +++ b/Manual/Intro.lean @@ -73,19 +73,19 @@ tag := "code-samples" This document contains many Lean code examples. They are formatted as follows: -````lean +```lean def hello : IO Unit := IO.println "Hello, world!" -```` +``` Compiler output (which may be errors, warnings, or just information) is shown both in the code and separately: -````lean (name := output) (error := true) +```lean (name := output) (error := true) #eval s!"The answer is {2 + 2}" theorem bogus : False := by sorry example := Nat.succ "two" -```` +``` Informative output, such as the result of {keywordOf Lean.Parser.Command.eval}`#eval`, is shown like this: ```leanOutput output (severity := information) @@ -199,6 +199,11 @@ inductive Even : Nat → Prop where :::: +# How to Cite This Work + +In formal citations, please cite this work as _The Lean Language Reference_ by The Lean Developers. +Additionally, please include the corresponding version of Lean in the citation, which is {versionString}[]. + # Open-Source Licenses %%% tag := "dependency-licenses" diff --git a/Manual/Language/InductiveTypes.lean b/Manual/Language/InductiveTypes.lean index ab9ca1a5..9fe843e3 100644 --- a/Manual/Language/InductiveTypes.lean +++ b/Manual/Language/InductiveTypes.lean @@ -366,16 +366,12 @@ tag := "inductive-types-runtime-special-support" Not every inductive type is represented as indicated here—some inductive types have special support from the Lean compiler: :::keepEnv -````lean (show := false) +```lean (show := false) axiom α : Prop -```` +``` * The representation of the fixed-width integer types {lean}`UInt8`, …, {lean}`UInt64`, {lean}`Int8`, …, {lean}`Int64`, and {lean}`USize` depends on the whether the code is compiled for a 32- or 64-bit architecture. - Fixed-width integer types that are strictly smaller than the architecture's pointer type are stored unboxed by setting the lowest bit of a pointer to `1`. - Integer types that are at least as large as the architecture's pointer type may be boxed or unboxed, depending on whether a concrete value fits in one fewer bits than the pointer type. - If so, it is encoded by setting the lowest bit of the value to `1` (checked by {c}`lean_is_scalar`). - Otherwise, the value is represented is a pointer to a fixed-size Lean object on the heap. - In the C FFI, these values are marshalled into the appropriate C types {c}`uint8_t`, …, {c}`uint64_t`, and {c}`size_t`, respectively.{margin}[Fixed-width signed integer types are also represented as unsigned C integers in the FFI.] + Their representation is described {ref "fixed-int-runtime"}[in a dedicated section]. * {lean}`Char` is represented by `uint32_t`. Because {lean}`Char` values never require more than 21 bits, they are always unboxed. @@ -386,7 +382,7 @@ axiom α : Prop * {lean}`Decidable α` is represented the same way as `Bool` {TODO}[Aren't Decidable and Bool just special cases of the rules for trivial constructors and irrelevance?] * {lean}`Nat` and {lean}`Int` are represented by {c}`lean_object *`. - A run-time {lean}`Nat` or {lean}`Int` value is either a pointer to an opaque arbitrary-precision integer object or, if the lowest bit of the “pointer” is `1` (checked by {c}`lean_is_scalar`), an encoded unboxed natural number or integer ({c}`lean_box`/{c}`lean_unbox`). {TODO}[Move these to FFI section or Nat chapter] + Their representations are described in more detail in {ref "nat-runtime"}[the section on natural numbers] and {ref "int-runtime"}[the section on integers]. ::: @@ -473,7 +469,7 @@ The memory order of the fields is derived from the types and order of the fields * Fields of type {lean}`USize` * Other scalar fields, in decreasing order by size -Within each group the fields are ordered in declaration order. **Warning**: Trivial wrapper types still count toward a field being treated as non-scalar for this purpose. +Within each group the fields are ordered in declaration order. *Warning*: Trivial wrapper types still count toward a field being treated as non-scalar for this purpose. * To access fields of the first kind, use {c}`lean_ctor_get(val, i)` to get the `i`th non-scalar field. * To access {lean}`USize` fields, use {c}`lean_ctor_get_usize(val, n+i)` to get the {c}`i`th `USize` field and {c}`n` is the total number of fields of the first kind. @@ -552,7 +548,9 @@ example : OddList String := .cons "x" (.cons "y" (.cons "z" .nil)) example : OddList String := .cons "x" (.cons "y" .nil) ``` ```leanOutput evenOddMut -invalid dotted identifier notation, unknown identifier `OddList.nil` from expected type +Unknown identifier `OddList.nil` + +Note: Inferred this identifier from the expected type of `.nil`: OddList String ``` ::: @@ -582,7 +580,9 @@ mutual inductive FreshList (α : Type) (r : α → α → Prop) : Type where | nil : FreshList α r | cons (x : α) (xs : FreshList α r) (fresh : Fresh r x xs) - inductive Fresh (r : α → FreshList α → Prop) : α → FreshList α r → Prop where + inductive Fresh + (r : α → FreshList α → Prop) : + α → FreshList α r → Prop where | nil : Fresh r x .nil | cons : r x y → (f : Fresh r x ys) → Fresh r x (.cons y ys f) end @@ -659,11 +659,16 @@ These mutually-inductive types are a somewhat complicated way to represent run-l mutual inductive RLE : List α → Type where | nil : RLE [] - | run (x : α) (n : Nat) : n ≠ 0 → PrefixRunOf n x xs ys → RLE ys → RLE xs + | run (x : α) (n : Nat) : + n ≠ 0 → PrefixRunOf n x xs ys → RLE ys → RLE xs inductive PrefixRunOf : Nat → α → List α → List α → Type where - | zero (noMore : ¬∃zs, xs = x :: zs := by simp) : PrefixRunOf 0 x xs xs - | succ : PrefixRunOf n x xs ys → PrefixRunOf (n + 1) x (x :: xs) ys + | zero + (noMore : ¬∃zs, xs = x :: zs := by simp) : + PrefixRunOf 0 x xs xs + | succ : + PrefixRunOf n x xs ys → + PrefixRunOf (n + 1) x (x :: xs) ys end example : RLE [1, 1, 2, 2, 3, 1, 1, 1] := @@ -682,11 +687,18 @@ Specifying {name}`PrefixRunOf` as a {lean}`Prop` would be sensible, but it canno mutual inductive RLE : List α → Type where | nil : RLE [] - | run (x : α) (n : Nat) : n ≠ 0 → PrefixRunOf n x xs ys → RLE ys → RLE xs + | run + (x : α) (n : Nat) : + n ≠ 0 → PrefixRunOf n x xs ys → RLE ys → + RLE xs inductive PrefixRunOf : Nat → α → List α → List α → Prop where - | zero (noMore : ¬∃zs, xs = x :: zs := by simp) : PrefixRunOf 0 x xs xs - | succ : PrefixRunOf n x xs ys → PrefixRunOf (n + 1) x (x :: xs) ys + | zero + (noMore : ¬∃zs, xs = x :: zs := by simp) : + PrefixRunOf 0 x xs xs + | succ : + PrefixRunOf n x xs ys → + PrefixRunOf (n + 1) x (x :: xs) ys end ``` ```leanOutput rleBad diff --git a/Manual/Language/InductiveTypes/LogicalModel.lean b/Manual/Language/InductiveTypes/LogicalModel.lean index b0c3e6f6..892b5ee7 100644 --- a/Manual/Language/InductiveTypes/LogicalModel.lean +++ b/Manual/Language/InductiveTypes/LogicalModel.lean @@ -38,7 +38,7 @@ tag := "recursor-types" The recursor takes the following parameters: : The inductive type's {tech}[parameters] - Because parameters are consistent, they can be abstracted over the entire recursor + Because parameters are consistent, they can be abstracted over the entire recursor. : The {deftech}_motive_ @@ -48,23 +48,23 @@ The recursor takes the following parameters: For each constructor, the recursor expects a function that satisfies the motive for an arbitrary application of the constructor. Each minor premise abstracts over all of the constructor's parameters. - If the constructor's parameter's type is the inductive type itself, then the case additionally takes a parameter whose type is the motive applied to that parameter's value—this will receive the result of recursively processing the recursive parameter. + If the constructor's parameter's type is the inductive type itself, then the minor premise additionally takes a parameter whose type is the motive applied to that parameter's value—this will receive the result of recursively processing the recursive parameter. : The {deftech}_major premise_, or target Finally, the recursor takes an instance of the type as an argument, along with any index values. -The result type of the recursor is the motive applied to these indices and the target. +The result type of the recursor is the motive applied to these indices and the major premise. ::: :::example "The recursor for {lean}`Bool`" {lean}`Bool`'s recursor {name}`Bool.rec` has the following parameters: * The motive computes a type in any universe, given a {lean}`Bool`. - * There are cases for both constructors, in which the motive is satisfied for both {lean}`false` and {lean}`true`. - * The target is some {lean}`Bool`. + * There are minor premises for both constructors, in which the motive is satisfied for both {lean}`false` and {lean}`true`. + * The major premise is some {lean}`Bool`. -The return type is the motive applied to the target. +The return type is the motive applied to the major premise. ```signature Bool.rec.{u} {motive : Bool → Sort u} @@ -82,15 +82,15 @@ Bool.rec.{u} {motive : Bool → Sort u} axiom α.{u} : Type u ``` - * The parameter {lean}`α` comes first, because the parameter and the cases need to refer to it + * The parameter {lean}`α` comes first, because the motive, minor premises, and major premise need to refer to it. * The motive computes a type in any universe, given a {lean}`List α`. There is no connection between the universe levels `u` and `v`. - * There are cases for both constructors: + * There are minor premises for both constructors: - The motive is satisfied for {name}`List.nil` - The motive should be satisfiable for any application of {name}`List.cons`, given that it is satisfiable for the tail. The extra parameter `motive tail` is because `tail`'s type is a recursive occurrence of {name}`List`. - * The target is some {lean}`List α`. + * The major premise is some {lean}`List α`. ::: -Once again, the return type is the motive applied to the target. +Once again, the return type is the motive applied to the major premise. ```signature List.rec.{u, v} {α : Type v} {motive : List α → Sort u} @@ -114,9 +114,9 @@ inductive EvenOddList (α : Type u) : Bool → Type u where The recursor {name}`EvenOddList.rec` is very similar to that for `List`. The difference comes from the presence of the index: * The motive now abstracts over any arbitrary choice of index. - * The case for {name EvenOddList.nil}`nil` applies the motive to {name EvenOddList.nil}`nil`'s index value `true`. - * The case for {name EvenOddList.cons}`cons` abstracts over the index value used in its recursive occurrence, and instantiates the motive with its negation. - * The target additionally abstracts over an arbitrary choice of index. + * The minor premise for {name EvenOddList.nil}`nil` applies the motive to {name EvenOddList.nil}`nil`'s index value `true`. + * The minor premise {name EvenOddList.cons}`cons` abstracts over the index value used in its recursive occurrence, and instantiates the motive with its negation. + * The major premise additionally abstracts over an arbitrary choice of index. ```signature EvenOddList.rec.{u, v} {α : Type v} @@ -132,7 +132,7 @@ EvenOddList.rec.{u, v} {α : Type v} ::::: When using a predicate (that is, a function that returns a {lean}`Prop`) for the motive, recursors express induction. -The cases for non-recursive constructors are the base cases, and the additional arguments supplied to constructors with recursive arguments are the induction hypotheses. +The minor premises for non-recursive constructors are the base cases, and the additional arguments supplied to minor premises for constructors with recursive arguments are the induction hypotheses. ### Subsingleton Elimination %%% @@ -218,11 +218,11 @@ tag := "iota-reduction" In addition to adding new constants to the logic, inductive type declarations also add new reduction rules. -These rules govern the interaction between recursors and constructors; specifically recursors that have constructors as their targets. +These rules govern the interaction between recursors and constructors; specifically recursors that have constructors as their major premise. This form of reduction is called {deftech}_ι-reduction_ (iota reduction){index}[ι-reduction]{index (subterm:="ι (iota)")}[reduction]. -When the recursor's target is a constructor with no recursive parameters, the recursor application reduces to an application of the constructor's case to the constructor's arguments. -If there are recursive parameters, then these arguments to the case are found by applying the recursor to the recursive occurrence. +When the recursor's major premise is a constructor with no recursive parameters, the recursor application reduces to an application of the constructor's minor premise to the constructor's arguments. +If there are recursive parameters, then these arguments to the minor premise are found by applying the recursor to the recursive occurrence. # Well-Formedness Requirements %%% @@ -367,9 +367,9 @@ tag := "recursor-elaboration-helpers" In addition to the type constructor, constructors, and recursors that Lean's core type theory prescribes for inductive types, Lean constructs a number of useful helpers. First, the equation compiler (which translates recursive functions with pattern matching in to applications of recursors) makes use of these additional constructs: - * `recOn` is a version of the recursor in which the target is prior to the cases for each constructor. - * `casesOn` is a version of the recursor in which the target is prior to the cases for each constructor, and recursive arguments do not yield induction hypotheses. It expresses case analysis rather than primitive recursion. - * `below` computes a type that, for some motive, expresses that _all_ inhabitants of the inductive type that are subtrees of the target satisfy the motive. It transforms a motive for induction or primitive recursion into a motive for strong recursion or strong induction. + * `recOn` is a version of the recursor in which the major premise is prior to the minor premise for each constructor. + * `casesOn` is a version of the recursor in which the major premise is prior to the minor premise for each constructor, and recursive arguments do not yield induction hypotheses. It expresses case analysis rather than primitive recursion. + * `below` computes a type that, for some motive, expresses that _all_ inhabitants of the inductive type that are subtrees of the major premise satisfy the motive. It transforms a motive for induction or primitive recursion into a motive for strong recursion or strong induction. * `brecOn` is a version of the recursor in which `below` is used to provide access to all subtrees, rather than just immediate recursive parameters. It represents strong induction. * `noConfusion` is a general statement from which injectivity and disjointness of constructors can be derived. * `noConfusionType` is the motive used for `noConfusion` that determines what the consequences of two constructors being equal would be. For separate constructors, this is {lean}`False`; if both constructors are the same, then the consequence is the equality of their respective parameters. diff --git a/Manual/Language/InductiveTypes/Structures.lean b/Manual/Language/InductiveTypes/Structures.lean index d74b6238..ed123342 100644 --- a/Manual/Language/InductiveTypes/Structures.lean +++ b/Manual/Language/InductiveTypes/Structures.lean @@ -501,9 +501,9 @@ Evaluating the first field index of {name}`coords` yields the underlying {name}` The elaborator translates {lean}`coords.fst` into {lean}`coords.toPair.fst`. -````lean (show := false) (keep := false) +```lean (show := false) (keep := false) example (t : Triple α) : t.fst = t.toPair.fst := rfl -```` +``` :::: :::: example "No structure subtyping" diff --git a/Manual/Meta/ElanCmd.lean b/Manual/Meta/ElanCmd.lean index 7b2dc4c8..993fa646 100644 --- a/Manual/Meta/ElanCmd.lean +++ b/Manual/Meta/ElanCmd.lean @@ -204,7 +204,7 @@ def elanMeta.descr : InlineDescr where pure <| .seq #[← go b, .raw "\n"] extraCss := [highlightingStyle] extraJs := [highlightingJs] - extraJsFiles := [("popper.js", popper), ("tippy.js", tippy)] + extraJsFiles := [{filename := "popper.js", contents := popper}, {filename := "tippy.js", contents := tippy}] extraCssFiles := [("tippy-border.css", tippy.border.css)] toHtml := open Verso.Output.Html in @@ -295,7 +295,7 @@ def elanArgs.descr : InlineDescr where extraCss := [highlightingStyle] extraJs := [highlightingJs] - extraJsFiles := [("popper.js", popper), ("tippy.js", tippy)] + extraJsFiles := [{filename := "popper.js", contents := popper}, {filename := "tippy.js", contents := tippy}] extraCssFiles := [("tippy-border.css", tippy.border.css)] toHtml := open Verso.Output.Html in diff --git a/Manual/Meta/ElanOpt.lean b/Manual/Meta/ElanOpt.lean index e3d4a795..fedf43d5 100644 --- a/Manual/Meta/ElanOpt.lean +++ b/Manual/Meta/ElanOpt.lean @@ -109,15 +109,15 @@ def elanOptDef.descr : InlineDescr where toHtml := open Verso.Output.Html in some <| fun goB id data content => do - let .arr #[.str name, _jsonKind, meta] := data + let .arr #[.str name, _jsonKind, metadata] := data | HtmlT.logError s!"Failed to deserialize metadata for Elan option def: {data}"; content.mapM goB let idAttr := (← read).traverseState.htmlId id - let .ok meta := FromJson.fromJson? (α := Option String) meta - | HtmlT.logError s!"Failed to deserialize argument metadata for Elan option def: {meta}"; content.mapM goB + let .ok metadata := FromJson.fromJson? (α := Option String) metadata + | HtmlT.logError s!"Failed to deserialize argument metadata for Elan option def: {metadata}"; content.mapM goB - if let some mv := meta then + if let some mv := metadata then pure {{{{name}}" "{{mv}}}} else pure {{{{name}}}} diff --git a/Manual/Meta/ErrorExplanation.lean b/Manual/Meta/ErrorExplanation.lean new file mode 100644 index 00000000..911def5f --- /dev/null +++ b/Manual/Meta/ErrorExplanation.lean @@ -0,0 +1,731 @@ +/- +Copyright (c) 2025 Lean FRO LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Author: Joseph Rotella +-/ + +import VersoManual + +import Manual.Meta + +import Lean.ErrorExplanations + +import PreprocessedExplanations + +open Lean Elab +open Verso.ArgParse +open Verso.Doc Elab +open Verso.Genre.Manual Markdown InlineLean +open SubVerso.Highlighting + +set_option pp.rawOnError true +set_option guard_msgs.diff true + +namespace Manual + +register_option manual.requireErrorExplanations : Bool := { + defValue := true, + descr := "Whether to fail or warn when error explanations don't match. Must be `true` for releases." +} + +/-- Loads the JSON data file for the preprocessed MWE code block `name`. -/ +def loadPreprocessedMWE (name : Name) (contents : String) + : MetaM (Highlighted × Array (MessageSeverity × String)) := do + let fileName : String := name.toString ++ ".json" + let path := preprocessedExplanationsRoot / fileName + unless (← System.FilePath.pathExists path) do + throwError m!"Did not find expected preprocessed code block file `{path}`. \ + Run `lake build error_explanations`." + let fileContents ← IO.FS.readFile path + let json ← ofExcept <| Json.parse fileContents + let hls ← ofExcept <| json.getObjVal? "highlighted" + >>= FromJson.fromJson? (α := Highlighted) + let messages ← ofExcept <| json.getObjVal? "messages" + >>= FromJson.fromJson? (α := Array (MessageSeverity × String)) + let fileHash ← ofExcept <| json.getObjVal? "hash" + >>= FromJson.fromJson? (α := UInt64) + let fileVersion ← ofExcept <| json.getObjVal? "version" >>= Json.getStr? + unless fileHash == hash contents && fileVersion == Lean.versionString do + throwError m!"Preprocessed code block data file `{path}` is out of date. \ + Run `lake build error_explanations`." + return (hls, messages) + +/-- +A modified version of `Verso.Genre.Manual.InlineLean.lean` for rendering an MWE +in an error explanation. +-/ +def explanationMWE : CodeBlockExpander + | args, str => Manual.withoutAsync <| do + let config ← LeanBlockConfig.parse.run args + + let some name := config.name + | throwError "Explanation MWE is missing a name" + let (hls, msgs) ← loadPreprocessedMWE name str.getString + saveOutputs name msgs.toList + + pure #[← ``(Block.other + (Block.lean $(quote hls) (some $(quote (← getFileName))) none) + #[Block.code $str])] + +/- +A tabbed container for MWEs in an error explanation example. Must satisfy the +invariant that `titles.size` is equal to the number of children of this block. +-/ +block_extension Block.tabbedMWEs (titles : Array String) where + data := toJson titles + traverse id data _blocks := do + let name := + match FromJson.fromJson? (α := Option String) data with + | .ok (some name) => name + | _ => "error-example" + discard <| externalTag id (← read).path name + pure none + toTeX := none + extraCss := [r#" +.error-example-container:not(:last-child) { + border-bottom: 1px solid gray; + padding-bottom: var(--verso--box-padding); +} +.error-example-tab-list [role="tab"] { + position: relative; + z-index: 1; + background: white; + border: 0; + padding: 0.2em; + cursor: pointer; +} +.error-example-tab-list [role="tab"]:not(:last-child) { + margin-right: 1rem; +} +.error-example-tab-list [role="tab"][aria-selected="true"] { + border-bottom: 1px solid gray; +} +/* this rule and the following ensure that all tabs are the same height */ +.error-example-tab-view { + display: flex; +} +.error-example-tabpanel { + margin-right: -100%; + width: 100%; + display: block; +} +.error-example-tabpanel.error-example-tabpanel-hidden { + visibility: hidden; +} +.error-example-tabpanel .hl.lean .token { + /* unset transition to avoid lag when switching panels */ + transition: visibility 0s; +} + "#] + extraJs := [r#" +window.addEventListener('DOMContentLoaded', () => { + const tabLists = document.querySelectorAll('.error-example-tab-list') + tabLists.forEach(tabList => { + const tabs = tabList.querySelectorAll(':scope > [role="tab"]') + + const setActiveTab = (e) => { + for (const tab of tabs) { + const controllee = document.getElementById(tab.getAttribute('aria-controls')) + if (tab === e.target) { + tab.setAttribute('aria-selected', true) + controllee.classList.remove('error-example-tabpanel-hidden') + } else { + tab.setAttribute('aria-selected', false) + controllee.classList.add('error-example-tabpanel-hidden') + } + } + } + + tabs.forEach(tab => { + tab.addEventListener('click', setActiveTab) + }) + + let focusedIdx = 0 + tabList.addEventListener('keydown', e => { + if (e.key === 'ArrowRight' || e.key === 'ArrowLeft') { + tabs[focusedIdx].setAttribute('tabindex', -1) + focusedIdx = + e.key === 'ArrowRight' + ? (focusedIdx + 1) % tabs.length + : (focusedIdx - 1 + tabs.length) % tabs.length + tabs[focusedIdx].setAttribute('tabindex', 0) + tabs[focusedIdx].focus() + } + }) + }) +}) + "#] + toHtml := some fun _goI goB id info contents => + open Verso.Doc.Html in + open Verso.Output Html in do + let .ok titles := FromJson.fromJson? (α := Array String) info + | HtmlT.logError "Invalid titles JSON for example block" + pure .empty + unless titles.size == contents.size do + HtmlT.logError s!"Mismatched number of titles and contents for example block: \ + Found {contents.size} tab panels but {titles.size} titles." + return .empty + let some { htmlId, .. } := (← HtmlT.state).externalTags[id]? + | HtmlT.logError "Could not find tag for error example" + pure .empty + let buttons ← titles.mapIdxM fun i (title : String) => do + let (tabIndex, selected) := if i == 0 then ("0", "true") else ("-1", "false") + let idxStr := toString i + return {{ + + }} + let panels ← contents.mapIdxM fun i b => do + let className := "error-example-tabpanel" ++ if i == 0 then "" else " error-example-tabpanel-hidden" + let idxStr := toString i + return {{ +
+ {{ ← goB b }} +
+ }} + pure {{ +
+
+ {{buttons}} +
+
+ {{panels}} +
+
+ }} + +/-- +Given the name of the explanation in which it occurs and its index among all +code blocks therein, generates a name for a code block in an error explanation. +This is used for output tracking and to locate its corresponding JSON file. +-/ +private def mkExampleName (errorName : Name) (idx : Nat) : Name := + errorName ++ s!"block{idx}".toName + +structure ExplanCodeElabM.Context where + name : Name + +structure ExplanCodeElabM.State where + codeBlockIdx : Nat + +/-- +The monad in which code blocks within an error explanation are elaborated. +-/ +abbrev ExplanCodeElabM := + ReaderT ExplanCodeElabM.Context (StateT ExplanCodeElabM.State DocElabM) + +/-- +Attempts to elaborate block code in an error explanation: Lean (and unlabeled) +blocks should have a corresponding preprocessing cache file, output blocks are +checked against their corresponding Lean block's output, and all other code +blocks are rendered using the default Verso code element. +-/ +def tryElabErrorExplanationCodeBlock (errorName : Name) (errorSev : MessageSeverity) + (info? _lang : Option String) (str : String) : ExplanCodeElabM Term := do + if let some info := info? then + let { lang, kind?, .. } ← match ErrorExplanation.CodeInfo.parse info with + | .ok x => pure x + | .error e => throwError e + if lang == "output" then + let codeBlockIdx := (← get).codeBlockIdx - 1 + let name := mkExampleName errorName codeBlockIdx + let args := #[(← `(argument| $(mkIdent name):ident))] + let parsedArgs ← parseArgs args + let blocks ← try + withFreshMacroScope <| leanOutput parsedArgs (quote str) + catch + | .error ref msg => + let kindStr := kind?.map (s!" ({·} example)") |>.getD "" + -- Log rather than throw so we can detect all invalid outputs in a + -- single build + let logFailure := + if manual.requireErrorExplanations.get (← getOptions) then logErrorAt + else logWarningAt + logFailure ref m!"Invalid output for {(← read).name} code block \ + #{codeBlockIdx}{kindStr}: {msg}" + pure #[← ``(Verso.Doc.Block.code "")] + | e@(.internal ..) => throw e + return (← ``(Verso.Doc.Block.concat #[$blocks,*])) + else if lang == "" || lang == "lean" then + let mut args := #[] + let name := mkExampleName errorName (← get).codeBlockIdx + args := args.push (← `(argument| name := $(mkIdent name):ident)) + if let some kind := kind? then + let errorVal ← if kind == .broken && errorSev == .error then + `(arg_val|true) + else + `(arg_val|false) + args := args.push (← `(argument| error := $errorVal)) + let parsedArgs ← parseArgs args + let blocks ← withFreshMacroScope <| explanationMWE parsedArgs (quote str) + modify fun s => { s with codeBlockIdx := s.codeBlockIdx + 1 } + return (← ``(Verso.Doc.Block.concat #[$blocks,*])) + -- If this isn't labeled as an MWE, fall back on a basic code block + ``(Verso.Doc.Block.code $(quote str)) + +/-- The code contents of an example, not including any subsequent description. -/ +private structure ExampleContents where + title : Array Term + codeBlocks : Array (Term × Option String) + descrBlocks : Array Term + +structure ExplanElabM.Context where + /-- The blocks in the error explanation to elaborate. -/ + blocks : Array MD4Lean.Block + /-- Name of the error described by the explanation being elaborated. -/ + name : Name + /-- Severity of error described by the explanation being elaborated. -/ + severity : MessageSeverity + +structure ExplanElabM.State where + /-- The index of the next block in the context's `blocks` to elaborate. -/ + blockIdx : Nat := 0 + /-- Active Markdown header levels that can be closed by subsequent Markdown -/ + levels : List (Nat × Nat) := [] + /-- The index of the current code block within this explanation. -/ + codeBlockIdx : Nat := 0 + +/-- The monad in which error explanations are elaborated. -/ +abbrev ExplanElabM := ReaderT ExplanElabM.Context (StateT ExplanElabM.State PartElabM) + +def ExplanElabM.run (x : ExplanElabM α) (name : Name) + (severity : MessageSeverity) (blocks : Array MD4Lean.Block) : + PartElabM (α × ExplanElabM.State) := + ReaderT.run x { name, blocks, severity } |>.run {} + +def ExplanElabM.nextBlock? : ExplanElabM (Option MD4Lean.Block) := do + let curBlockIdx := (← get).blockIdx + let blocks := (← read).blocks + if h : curBlockIdx ≥ blocks.size then + return none + else + modify fun s => { s with blockIdx := s.blockIdx + 1 } + return blocks[curBlockIdx] + +def ExplanElabM.backtrack : ExplanElabM Unit := do + modify fun s => { s with blockIdx := s.blockIdx - 1 } + +def ExplanElabM.liftExplanCodeElabM (x : ExplanCodeElabM α) : ExplanElabM α := do + let { codeBlockIdx, .. } ← get + let { name, .. } ← read + let (res, st) ← x.run { name } { codeBlockIdx } + modify fun s => { s with codeBlockIdx := st.codeBlockIdx } + return res + +instance : MonadLift ExplanCodeElabM ExplanElabM where + monadLift := ExplanElabM.liftExplanCodeElabM + +/-- +Elaborates inline code in strict mode, restoring the state afterward. + +We have to do state restoration after each inline elaboration because the block +elaborator needs to have its `TermElabM` state changes persisted, as the part +elaborator modifies this state during elaboration. +-/ +private def tryElabInlineCodeStrictRestoringState + (tactics : Array Tactic.Doc.TacticDoc) (keywords : Array String) + (prevWord? : Option String) (str : String) : ExplanElabM Term := do + let b ← (saveState : TermElabM _) + try + let t ← tryElabInlineCodeStrict tactics keywords prevWord? str + Term.synthesizeSyntheticMVarsUsingDefault + pure t + finally + b.restore + +/-- Returns a Verso term corresponding to `b`. -/ +def blockFromExplanationMarkdown (b : MD4Lean.Block) : ExplanElabM Term := do + let { name, severity .. } ← read + let tactics ← Elab.Tactic.Doc.allTacticDocs + let keywords := tactics.map (·.userName) + let ref ← getRef + blockFromMarkdown b + (handleHeaders := Markdown.strongEmphHeaders) + (elabInlineCode := some (tryElabInlineCodeStrictRestoringState tactics keywords)) + (elabBlockCode := some fun i l s => withRef ref <| + tryElabErrorExplanationCodeBlock name severity i l s) + +/-- Add block(s) corresponding to `b` to the current document part. -/ +def addPartFromExplanationMarkdown (b : MD4Lean.Block) : ExplanElabM Unit := do + let tactics ← Elab.Tactic.Doc.allTacticDocs + let keywords := tactics.map (·.userName) + let ref ← getRef + let {name, severity .. } ← read + let ls ← addPartFromMarkdown b + (handleHeaders := Markdown.strongEmphHeaders) + (elabInlineCode := some (tryElabInlineCodeStrictRestoringState tactics keywords)) + (elabBlockCode := some fun i l s => withRef ref <| + tryElabErrorExplanationCodeBlock name severity i l s) + modifyThe ExplanElabM.State ({ · with levels := ls }) + +/-- Extracts and parses the info string of a code block. -/ +private def infoOfCodeBlock : MD4Lean.Block → Except String ErrorExplanation.CodeInfo + | .code info _ _ _ => do + let txt ← attr' info + ErrorExplanation.CodeInfo.parse txt + | el => .error s!"Cannot get code block info from non-code block element:\n{repr el}" + +/-- +Returns `true` if `b` is a block with language `expLang` and, if +`expKind? = some expKind`, kind `expKind`. +-/ +private def blockHasExplanationCodeInfo + (b : MD4Lean.Block) (expLang : String) + (expKind? : Option ErrorExplanation.CodeInfo.Kind := none) + : DocElabM Bool := do + let { kind?, lang, .. } ← match infoOfCodeBlock b with + | .ok x => pure x + | .error _ => return false + let optMatch {α : Type} [BEq α] (expected? : Option α) (actual? : Option α) := + if let some expected := expected? then + some expected == actual? + else + true + return lang == expLang && optMatch expKind? kind? + +/-- Throws an error if `b` is not a code block with language `expLang` and kind `expKind`. -/ +private def expectExplanationCodeInfo + (b : MD4Lean.Block) (expLang : String) (expKind : ErrorExplanation.CodeInfo.Kind) + : DocElabM Unit := do + let { kind?, lang, .. } ← match infoOfCodeBlock b with + | .ok x => pure x + | .error e => throwError e + unless lang == expLang do + throwError "Expected a code block with language `{expLang}`, but found `{lang}`" + unless kind? == some expKind do + let str := kind?.map toString |>.getD "unspecified" + throwError "Expected a code block of kind `{expKind}`, but found `{str}`" + +/-- Returns `true` if `txt` is the "Examples" header text. -/ +private def isExamplesHeaderText (txt : Array MD4Lean.Text) : Bool := + if _ : txt.size = 1 then + match txt[0] with + | .normal str => str.trim == "Examples" + | _ => false + else false + +/-- Convert the accumulated contents of an example into a Verso block term. -/ +private def makeExample (contents : ExampleContents) : DocElabM Term := do + let {title, codeBlocks, descrBlocks } := contents + let titles := codeBlocks.mapIdx fun i (_, title?) => + let fallback := + if i == 0 then + "Original" + else if codeBlocks.size == 2 then + "Fixed" + else + s!"Fixed {i}" + title?.getD fallback + let codeBlocks := codeBlocks.map Prod.fst + let codeExample ← + ``(Block.other (Block.tabbedMWEs $(quote titles)) #[$codeBlocks,*]) + ``(Block.other (Block.example none (opened := true)) + #[Block.para #[$title,*], $codeExample, $descrBlocks,*]) + +private def titleOfCodeBlock? (b : MD4Lean.Block) : Option String := do + let info ← infoOfCodeBlock b |>.toOption + info.title? + +/-- Closes the last-opened section, throwing an error on failure. -/ +def closeEnclosingSection : PartElabM Unit := do + -- We use `default` as the source position because the Markdown doesn't have one + if let some ctxt' := (← getThe PartElabM.State).partContext.close default then + modifyThe PartElabM.State fun st => {st with partContext := ctxt'} + else + throwError m!"Failed to close the last-opened explanation part" + +/-- Adds explanation blocks until the "Examples" header is reached. -/ +def addNonExampleBlocks : ExplanElabM Unit := do + repeat + let some block ← ExplanElabM.nextBlock? + | return + if let MD4Lean.Block.header 1 txt := block then + if isExamplesHeaderText txt then + addPartFromExplanationMarkdown block + break + addPartFromExplanationMarkdown block + +/-- +Get the next code block if it's a broken Lean block along with its title. + +Note that this function errors on failure, since we never backtrack if a broken +code block is missing, and doing so allows us to provide more granular error +messages. +-/ +def getBrokenTermAndTitle : ExplanElabM (Term × Option String) := do + let some brokenBlock ← ExplanElabM.nextBlock? + | throwError "Found a header for a new example, but no following `broken` code block" + -- We don't bother backtracking here since we can't recover + expectExplanationCodeInfo brokenBlock "lean" .broken + let brokenTerm ← blockFromExplanationMarkdown brokenBlock + let title? := titleOfCodeBlock? brokenBlock + return (brokenTerm, title?) + +/-- Execute `x` until it returns `none`. -/ +partial def repeatedly (x : ExplanElabM (Option α)) : ExplanElabM (Array α) := + go x #[] +where + go x acc := do + if let some result := (← x) then + go x (acc.push result) + else + return acc + +/-- Get the next block if it is an output code block. -/ +def getOutputTerm? : ExplanElabM (Option Term) := do + let some block ← ExplanElabM.nextBlock? + | return none + if (← blockHasExplanationCodeInfo block "output") then + return some (← blockFromExplanationMarkdown block) + else + ExplanElabM.backtrack + return none + +/-- Get the next code block if it is a fixed Lean block, and, if so, its title if it has one. -/ +def getFixedTermAndTitle? : ExplanElabM (Option (Term × Option String)) := do + let some block ← ExplanElabM.nextBlock? + | return none + if (← blockHasExplanationCodeInfo block "lean" (some .fixed)) then + let title? := titleOfCodeBlock? block + return some (← blockFromExplanationMarkdown block, title?) + else + ExplanElabM.backtrack + return none + +/-- Get the next block(s) if they are a fixed code block with zero or more outputs. -/ +def getFixedTermAndOutputs? : ExplanElabM (Option (Term × Array Term × Option String)) := do + let some (fixedTerm, fixedTitle?) ← getFixedTermAndTitle? | return none + let outputs ← repeatedly getOutputTerm? + return some (fixedTerm, outputs, fixedTitle?) + +/-- Get the next block to elaborate if it's not an example-terminating header. -/ +def getExampleDescriptionTerm? : ExplanElabM (Option Term) := do + let some block ← ExplanElabM.nextBlock? + | return none + if block matches .header 1 _ | .header 2 _ then + ExplanElabM.backtrack + return none + else + return some (← blockFromExplanationMarkdown block) + +/-- +Add blocks corresponding to the "Examples" section of the explanation. Assumes +that the "Examples" header itself has already been added, and will repeatedly +add examples beginning with a level-2 header, followed by broken and fixed code +blocks with outputs, and descriptions thereof. +-/ +def addExampleBlocks : ExplanElabM Unit := do + repeat + let some block@(.header 2 titleTexts) ← ExplanElabM.nextBlock? | return + let `(Verso.Doc.Block.other #[$titleStxs,*]) ← blockFromMarkdown block + [fun (stxs : Array Term) => ``(Verso.Doc.Block.other #[$stxs,*])] + | throwError "Unexpected output when elaborating example header" + let title := titleStxs.getElems + let titleStr := String.join + (titleTexts.mapM stringFromMarkdownText |>.toOption.getD #[]).toList + + -- Broken code and output(s) + let (brokenCodeTerm, brokenTitle?) ← getBrokenTermAndTitle + let brokenOutputTerms ← repeatedly getOutputTerm? + if brokenOutputTerms.isEmpty then + throwError m!"Missing output for broken code snippet in example '{titleStr}'" + let brokenWithTitle := + (← ``(Block.concat #[$brokenCodeTerm, $brokenOutputTerms,*]), brokenTitle?) + + -- Fixed version(s) with optional output(s) + let fixedTermsAndOutputs ← repeatedly getFixedTermAndOutputs? + if fixedTermsAndOutputs.isEmpty then + throwError m!"Found a `broken` code block but no following `fixed` code block for example '{titleStr}'" + let fixedWithTitles ← fixedTermsAndOutputs.mapM fun (code, outs, title?) => + return (← ``(Block.concat #[$code, $outs,*]), title?) + + -- Arbitrary description of above code blocks + let exampleDescrs ← repeatedly getExampleDescriptionTerm? + let exampleInfo : ExampleContents := { + title + codeBlocks := #[brokenWithTitle] ++ fixedWithTitles + descrBlocks := exampleDescrs + } + let ex ← makeExample exampleInfo + PartElabM.addBlock ex + +/-- +Adds blocks constituting the explanation body to the document. The topmost +routine for rendering an explanation in `ExplanElabM`. +-/ +def addExplanationBodyBlocks : ExplanElabM Unit := do + addNonExampleBlocks + addExampleBlocks + +deriving instance Quote for ErrorExplanation.Metadata + +block_extension Block.errorExplanationMetadata (metadata : ErrorExplanation.Metadata) where + data := toJson metadata + traverse _ _ _ := pure none + toTeX := none + extraCss := [" + .error-explanation-metadata { + margin-bottom: 2rem; /* Double the paragraph margin */ + } + + .error-explanation-metadatum:not(:last-child):after { + content: '|'; + margin: 0 10px; + } + .error-explanation-removed-warning { + border: 1px solid var(--verso-warning-color); + border-radius: 0.5rem; + padding-left: var(--verso--box-padding); + padding-right: var(--verso--box-padding); + } + "] + toHtml := some fun _goI _goB _id info _contents => + open Verso.Doc.Html in + open Verso.Output Html in do + let .ok metadata := FromJson.fromJson? (α := ErrorExplanation.Metadata) info + | HtmlT.logError "Failed to parse info for error explanation metadata block:\n{metadata}" + pure .empty + let deprecatedWarning := + if metadata.removedVersion?.isSome then + {{
+

"Note: " "This diagnostic is no longer produced."

+
}} + else + .empty + let sevText := if metadata.severity matches .warning then "Warning" else "Error" + let entries := #[("Severity", sevText), ("Since", metadata.sinceVersion)] + ++ (metadata.removedVersion?.map fun v => #[("Removed", v)]).getD #[] + let entries := entries.map fun (label, data) => + {{ + {{Html.text true label}}": " + {{Html.text true data}} + }} + return {{ + + }} + +/-- Adds the specified explanation metadata to the document. -/ +def addExplanationMetadata (metadata : ErrorExplanation.Metadata) : PartElabM Unit := do + PartElabM.addBlock (← ``(Block.other (Block.errorExplanationMetadata $(quote metadata)) #[])) + +/-- Adds the metadata and bofy of the explanation with name `name` to the document. -/ +def addExplanationBlocksFor (name : Name) : PartElabM Unit := do + let explan? ← getErrorExplanation? name + match explan? with + | .none => + throwError m!"Adding explanation blocks failed: Could not find explanation for {name}" + | some explan => + try + let some ast := MD4Lean.parse explan.doc + | throwErrorAt (← getRef) "Failed to parse docstring as Markdown" + addExplanationMetadata explan.metadata + let (_, { levels, .. }) ← addExplanationBodyBlocks.run name explan.metadata.severity ast.blocks + for _ in levels do + closeEnclosingSection + catch + | .error ref msg => throw <| .error ref m!"Failed to process explanation for {name}: {msg}" + | e => throw e + +def errorExplanationDomain := `Manual.errorExplanation + +inline_extension Inline.errorExplanation (errorName : Name) (summary : String) where + data := toJson #[errorName.toString, summary] + init st := st + |>.setDomainTitle errorExplanationDomain "Error Explanations" + |>.setDomainDescription errorExplanationDomain + "Explanations of error messages and warnings produced during compilation" + + traverse id info _ := do + let .ok #[errorName, summary] := FromJson.fromJson? (α := Array String) info + | logError s!"Invalid JSON for error explanation:\n{info}"; pure none + modify fun s => + s |>.saveDomainObject errorExplanationDomain errorName id + |>.saveDomainObjectData errorExplanationDomain errorName (json%{"summary": $summary}) + let path ← (·.path) <$> read + discard <| Verso.Genre.Manual.externalTag id path errorName + pure none + + toTeX := none + toHtml := some fun go id _info contents => + open Verso.Doc.Html in + open Verso.Output Html in do + let xref ← HtmlT.state + let idAttr := xref.htmlId id + return {{ + + {{← contents.mapM go}} + + }} + +/-- Configuration for an `explanation` block. -/ +structure ExplanationConfig where + name : Ident + +def ExplanationConfig.parser [Monad m] [MonadError m] : ArgParse m ExplanationConfig := + ExplanationConfig.mk <$> .positional `name { + description := "name of error whose explanation to display", + get := fun + | .name x => pure x + | other => throwError "Expected error name, got {repr other}" + } + +/-- Renders the error explanation for `name` via `{explanation name}`. -/ +@[part_command Verso.Syntax.block_role] +def explanation : PartCommand + | `(block|block_role{explanation $args*}) => do + let config ← ExplanationConfig.parser.run (← parseArgs args) + addExplanationBlocksFor config.name.getId + | _ => Lean.Elab.throwUnsupportedSyntax + +/-- +Returns the suffix of `name` as a string containing soft-hyphen characters at reasonable split points. +-/ +def getBreakableSuffix (name : Name) : Option String := do + let suffix ← match name with + | .str _ s => s + | .num _ n => toString n + | .anonymous => none + let breakableHtml := softHyphenateIdentifiers.rwText (.text false suffix) + htmlText breakableHtml +where + htmlText : Verso.Output.Html → String + | .text _ txt => txt + | .seq elts => elts.foldl (· ++ htmlText ·) "" + | .tag _nm _attrs children => htmlText children + +open Verso Doc Elab ArgParse in +open Lean in +/-- Renders all error explanations as parts of the current page. -/ +@[part_command Verso.Syntax.block_role] +def make_explanations : PartCommand + | `(block|block_role{make_explanations}) => do + let explans ← getErrorExplanationsSorted + for (name, explan) in explans do + let titleString := name.toString + let titleBits := #[← ``(Inline.other + (Inline.errorExplanation $(quote name) $(quote explan.metadata.summary)) + #[Inline.code $(quote titleString)])] + let some shortTitleString := getBreakableSuffix name + | throwError m!"Found invalid explanation name `{name}` when generating explanations section" + PartElabM.push { + titleSyntax := quote (k := `str) titleString, + expandedTitle := some (titleString, titleBits), + metadata := some (← `({ shortTitle := $(quote shortTitleString) })), + blocks := #[], + priorParts := #[] + } + addExplanationBlocksFor name + closeEnclosingSection + | _ => throwUnsupportedSyntax diff --git a/Manual/Meta/Example.lean b/Manual/Meta/Example.lean index 10ec229f..52711698 100644 --- a/Manual/Meta/Example.lean +++ b/Manual/Meta/Example.lean @@ -16,19 +16,23 @@ open Lean Elab namespace Manual -def Block.example (name : Option String) : Block where +def Block.example (name : Option String) (opened : Bool) : Block where name := `Manual.example - data := ToJson.toJson (name, (none : Option Tag)) + data := ToJson.toJson (name, opened, (none : Option Tag)) structure ExampleConfig where description : FileMap × TSyntaxArray `inline /-- Name for refs -/ tag : Option String := none keep : Bool := false + opened : Bool := false def ExampleConfig.parse [Monad m] [MonadInfoTree m] [MonadLiftT CoreM m] [MonadEnv m] [MonadError m] [MonadFileMap m] : ArgParse m ExampleConfig := - ExampleConfig.mk <$> .positional `description .inlinesString <*> .named `tag .string true <*> (.named `keep .bool true <&> (·.getD false)) + ExampleConfig.mk <$> .positional `description .inlinesString + <*> .named `tag .string true + <*> (.named `keep .bool true <&> (·.getD false)) + <*> (.named `open .bool true <&> (·.getD false)) def prioritizedElab [Monad m] (prioritize : α → m Bool) (act : α → m β) (xs : Array α) : m (Array β) := do let mut out := #[] @@ -70,19 +74,20 @@ def «example» : DirectiveExpander withoutModifyingEnv <| prioritizedElab (isLeanBlock ·) elabBlock contents -- Examples are represented using the first block to hold the description. Storing it in the JSON -- entails repeated (de)serialization. - pure #[← ``(Block.other (Block.example $(quote cfg.tag)) #[Block.para #[$description,*], $blocks,*])] + pure #[← ``(Block.other (Block.example $(quote cfg.tag) (opened := $(quote cfg.opened))) + #[Block.para #[$description,*], $blocks,*])] @[block_extension «example»] def example.descr : BlockDescr where traverse id data contents := do - match FromJson.fromJson? data (α := Option String × Option Tag) with + match FromJson.fromJson? data (α := Option String × Bool × Option Tag) with | .error e => logError s!"Error deserializing example tag: {e}"; pure none - | .ok (none, _) => pure none - | .ok (some x, none) => + | .ok (none, _, _) => pure none + | .ok (some x, opened, none) => let path ← (·.path) <$> read let tag ← Verso.Genre.Manual.externalTag id path x - pure <| some <| Block.other {Block.example none with id := some id, data := toJson (some x, some tag)} contents - | .ok (some _, some _) => pure none + pure <| some <| Block.other {Block.example none false with id := some id, data := toJson (some x, opened, some tag)} contents + | .ok (some _, _, some _) => pure none toTeX := some <| fun _ go _ _ content => do pure <| .seq <| ← content.mapM fun b => do @@ -90,15 +95,21 @@ def example.descr : BlockDescr where toHtml := open Verso.Doc.Html in open Verso.Output.Html in - some <| fun goI goB id _data blocks => do + some <| fun goI goB id data blocks => do if h : blocks.size < 1 then HtmlT.logError "Malformed example" pure .empty else let .para description := blocks[0] | HtmlT.logError "Malformed example - description not paragraph"; pure .empty + let opened ← + match FromJson.fromJson? data (α := Option String × Bool × Option Tag) with + | .error e => HtmlT.logError s!"Error deserializing example data: {e}"; pure false + | .ok (_, opened, _) => pure opened let xref ← HtmlT.state - let attrs := xref.htmlId id + let mut attrs := xref.htmlId id + if opened then + attrs := attrs.push ("open", "") pure {{
{{← description.mapM goI}} diff --git a/Manual/Meta/LakeCmd.lean b/Manual/Meta/LakeCmd.lean index f68cb6ad..8718eaf0 100644 --- a/Manual/Meta/LakeCmd.lean +++ b/Manual/Meta/LakeCmd.lean @@ -317,7 +317,7 @@ def lakeMeta.descr : InlineDescr where pure <| .seq #[← go b, .raw "\n"] extraCss := [highlightingStyle] extraJs := [highlightingJs] - extraJsFiles := [("popper.js", popper), ("tippy.js", tippy)] + extraJsFiles := [{filename := "popper.js", contents := popper}, {filename := "tippy.js", contents := tippy}] extraCssFiles := [("tippy-border.css", tippy.border.css)] toHtml := open Verso.Output.Html in @@ -408,7 +408,7 @@ def lakeArgs.descr : InlineDescr where extraCss := [highlightingStyle] extraJs := [highlightingJs] - extraJsFiles := [("popper.js", popper), ("tippy.js", tippy)] + extraJsFiles := [{filename := "popper.js", contents := popper}, {filename := "tippy.js", contents := tippy}] extraCssFiles := [("tippy-border.css", tippy.border.css)] toHtml := open Verso.Output.Html in diff --git a/Manual/Meta/LakeOpt.lean b/Manual/Meta/LakeOpt.lean index 53cb4041..7afa73a7 100644 --- a/Manual/Meta/LakeOpt.lean +++ b/Manual/Meta/LakeOpt.lean @@ -107,15 +107,15 @@ def lakeOptDef.descr : InlineDescr where toHtml := open Verso.Output.Html in some <| fun goB id data content => do - let .arr #[.str name, _jsonKind, meta] := data + let .arr #[.str name, _jsonKind, metadata] := data | HtmlT.logError s!"Failed to deserialize metadata for Lake option def: {data}"; content.mapM goB let idAttr := (← read).traverseState.htmlId id - let .ok meta := FromJson.fromJson? (α := Option String) meta - | HtmlT.logError s!"Failed to deserialize argument metadata for Lake option def: {meta}"; content.mapM goB + let .ok metadata := FromJson.fromJson? (α := Option String) metadata + | HtmlT.logError s!"Failed to deserialize argument metadata for Lake option def: {metadata}"; content.mapM goB - if let some mv := meta then + if let some mv := metadata then pure {{{{name}}"="{{mv}}}} else pure {{{{name}}}} @@ -149,7 +149,7 @@ def lakeOpt.descr : InlineDescr where toHtml := open Verso.Output.Html in - some <| fun goB _id data content => do + some <| fun goB _ data content => do let .arr #[.str name, .str original] := data | HtmlT.logError s!"Failed to deserialize metadata for Lake option ref: {data}"; content.mapM goB diff --git a/Manual/Meta/LakeToml.lean b/Manual/Meta/LakeToml.lean index e4f0f482..6c10fa1b 100644 --- a/Manual/Meta/LakeToml.lean +++ b/Manual/Meta/LakeToml.lean @@ -470,7 +470,7 @@ def asTable (humanName : String) (n : Name) (skip : List Name := []) : DocElabM else if type.isConstOf ``Lake.BuildType then some (.oneOf buildTypes) else if type.isConstOf ``Lake.StdVer then some .version else if type.isConstOf ``Lake.StrPat then some (.other ``Lake.StrPat "string pattern" none) - else if type.isAppOfArity ``Array 1 && (type.getArg! 0).isConstOf ``Lake.LeanOption then some (.array (.other ``Lake.LeanOption "Lean option" none)) + else if type.isAppOfArity ``Array 1 && (type.getArg! 0).isConstOf ``Lean.LeanOption then some (.array (.other ``Lean.LeanOption "Lean option" none)) else if type.isAppOfArity ``Array 1 && (type.getArg! 0).isConstOf ``String then some (.array .string) else if type.isAppOfArity ``Array 1 && (type.getArg! 0).isConstOf ``Name then some (.array .string) else if type.isAppOfArity ``Array 1 && (type.getArg! 0).isConstOf ``System.FilePath then some (.array .path) @@ -632,13 +632,32 @@ instance : Test (Lake.ConfigType kind pkg name) where | .anonymous => fun (x : Lake.OpaqueTargetConfig pkg name) => Test.toString x | _ => fun _ => "Impossible!" +instance : Test Lake.CacheRef where + toString _ := "#" + +private def contains (fmt : Format) (c : Char) : Bool := + match fmt with + | .text s => s.contains c + | .tag _ x | .group x .. | .nest _ x => contains x c + | .append x y => contains x c || contains y c + | .align .. | .line | .nil => false + +instance [Test α] : Test (Option α) where + toString + | none => "none" + | some x => + let s := Test.toString x + let s := if contains s '(' || contains s ' ' then "(" ++ s ++ ")" else s + s!"some " ++ s deriving instance Test for Lake.ConfigDecl deriving instance Test for Lake.PConfigDecl deriving instance Test for Lake.NConfigDecl + deriving instance Test for Lake.Package + open Lake Toml in def report [Monad m] [Lean.MonadLog m] [MonadFileMap m] [Test α] (val : α) (errs : Array DecodeError) : m String := do let mut result := "" diff --git a/Manual/Meta/LexedText.lean b/Manual/Meta/LexedText.lean index dd8eddde..593a48c8 100644 --- a/Manual/Meta/LexedText.lean +++ b/Manual/Meta/LexedText.lean @@ -138,8 +138,9 @@ def lexedText := () def c : CodeBlockExpander | args, str => do ArgParse.done.run args - let toks ← LexedText.highlight hlC str.getString - pure #[← ``(Block.other (Block.c $(quote toks)) #[])] + let codeStr := str.getString + let toks ← LexedText.highlight hlC codeStr + pure #[← ``(Block.other (Block.c $(quote toks)) #[Block.code $(quote codeStr)])] open Verso.Output Html in open Verso.Doc.Html in @@ -173,5 +174,6 @@ def cInline : RoleExpander | throwError "Expected exactly one parameter" let `(inline|code($str)) := x | throwError "Expected exactly one code item" - let toks ← LexedText.highlight hlC str.getString - pure #[← ``(Inline.other (Inline.c $(quote toks)) #[])] + let codeStr := str.getString + let toks ← LexedText.highlight hlC codeStr + pure #[← ``(Inline.other (Inline.c $(quote toks)) #[Inline.code $(quote codeStr)])] diff --git a/Manual/Meta/Monotonicity.lean b/Manual/Meta/Monotonicity.lean index c0997039..b852d950 100644 --- a/Manual/Meta/Monotonicity.lean +++ b/Manual/Meta/Monotonicity.lean @@ -36,15 +36,62 @@ private def mkInlineTable (rows : Array (Array Term)) (tag : Option String := no let blocks : Array Term := #[ ← ``(Inline.text "Theorem"), ← ``(Inline.text "Pattern") ] ++ rows.flatten + + -- The new compiler has a stack overflow when compiling the table unless we split it up. This + -- section is an elaborator to get good control over which parts of the table end up in their + -- own defs. + let arr1 ← mkFreshUserName `monoBlocks1 + let arr2 ← mkFreshUserName `monoBlocks2 + let blockName ← mkFreshUserName `block + + let blockType : Expr := .app (.const ``Doc.Block []) (.const ``Verso.Genre.Manual []) + let listItemBlockType : Expr := .app (.const ``ListItem [0]) blockType + let inlineType : Expr := .app (.const ``Doc.Inline []) (.const ``Verso.Genre.Manual []) + let listItemInlineType : Expr := .app (.const ``ListItem [0]) inlineType + let arrListItemBlockType : Expr := .app (.const ``Array [0]) listItemBlockType + + let elabCell (blk : Syntax) : TermElabM Expr := do + let blk ← Term.elabTerm blk (some inlineType) + let blk := mkApp2 (.const ``Block.para []) (.const ``Verso.Genre.Manual []) (← mkArrayLit inlineType [blk]) + let blk := mkApp2 (.const ``ListItem.mk [0]) blockType (← mkArrayLit blockType [blk]) + Term.synthesizeSyntheticMVarsNoPostponing + instantiateMVars blk + + let blks1 ← blocks.take 70 |>.mapM elabCell + let blks2 ← blocks.drop 70 |>.mapM elabCell + + let v1 ← mkArrayLit listItemBlockType blks1.toList + addAndCompile <| .defnDecl { + name := arr1, levelParams := [], type := arrListItemBlockType, value := v1, hints := .opaque, safety := .safe + } + + let v1' ← mkArrayLit listItemBlockType blks2.toList + addAndCompile <| .defnDecl { + name := arr2, levelParams := [], type := arrListItemBlockType, value := v1', hints := .opaque, safety := .safe + } + -- The tag down here is relying on the coercion from `String` to `Tag` - ``(Block.other (Block.table $(quote columns) (header := true) Option.none Option.none (tag := $(quote tag))) - #[Block.ul #[$[Verso.Doc.ListItem.mk #[Block.para #[$blocks]]],*]]) + let stx ← ``(Block.other (Block.table $(quote columns) (header := true) Option.none Option.none (tag := $(quote tag))) + #[Block.ul ($(mkIdent arr1) ++ $(mkIdent arr2))]) + let v2 ← Term.elabTerm stx (some blockType) + Term.synthesizeSyntheticMVarsNoPostponing + let v2 ← instantiateMVars v2 + + addAndCompile <| .defnDecl { + name := blockName, levelParams := [], type := blockType, value := v2, hints := .opaque, safety := .safe + } + + if ((← getEnv).find? blockName).isSome then + return mkIdent blockName + else + throwError "Failed to construct monotonicity lemma table" + section delabhelpers /-! -To format the monotonicy lemma patterns, I’d like to clearly mark the monotone arguments from +To format the monotonicity lemma patterns, I’d like to clearly mark the monotone arguments from the other arguments. So I define two gadgets with custom delaborators. -/ @@ -105,7 +152,6 @@ def monotonicityLemmas : BlockRoleExpander ``(Inline.other (Verso.Genre.Manual.InlineLean.Inline.lean $(quote hlCall)) #[(Inline.code $(quote fmt.pretty))]) pure #[nameStx, patternStx] - let tableStx ← mkInlineTable rows (tag := "--monotonicity-lemma-table") let extraCss ← `(Block.other {Block.CSS with data := $(quote css)} #[]) return #[extraCss, tableStx] diff --git a/Manual/Meta/ParserAlias.lean b/Manual/Meta/ParserAlias.lean index 6a495bad..2c8f83ac 100644 --- a/Manual/Meta/ParserAlias.lean +++ b/Manual/Meta/ParserAlias.lean @@ -128,7 +128,7 @@ def parserAlias.descr : BlockDescr where if autoGroupArgs then some {{"Automatically wraps arguments in a " "null" " node unless there's exactly one"}} else none - let meta := + let metadata := match grp with | none => {{

{{arity}}

}} | some g => {{
  • {{arity}}
  • {{g}}
}} @@ -139,7 +139,7 @@ def parserAlias.descr : BlockDescr where "parser alias"
{{← (Highlighted.seq #[x, args]).toHtml}}
- {{meta}} + {{metadata}} {{← contents.mapM goB}}
diff --git a/Manual/Meta/Syntax.lean b/Manual/Meta/Syntax.lean index 6aa5c60f..8adc49ab 100644 --- a/Manual/Meta/Syntax.lean +++ b/Manual/Meta/Syntax.lean @@ -209,7 +209,7 @@ window.addEventListener("load", () => { }); "# ] - extraJsFiles := [("popper.js", popper), ("tippy.js", tippy)] + extraJsFiles := [{filename := "popper.js", contents := popper}, {filename := "tippy.js", contents := tippy}] extraCssFiles := [("tippy-border.css", tippy.border.css)] @[role_expander keyword] @@ -915,7 +915,7 @@ def withOpenedNamespaces (nss : List Name) (act : DocElabM α) : DocElabM α := inductive SearchableTag where - | meta + | metavar | keyword | literalIdent | ws @@ -924,13 +924,13 @@ deriving DecidableEq, Ord, Repr open Lean.Syntax in instance : Quote SearchableTag where quote - | .meta => mkCApp ``SearchableTag.meta #[] + | .metavar => mkCApp ``SearchableTag.metavar #[] | .keyword => mkCApp ``SearchableTag.keyword #[] | .literalIdent => mkCApp ``SearchableTag.literalIdent #[] | .ws => mkCApp ``SearchableTag.ws #[] def SearchableTag.toKey : SearchableTag → String - | .meta => "meta" + | .metavar => "meta" | .keyword => "keyword" | .literalIdent => "literalIdent" | .ws => "ws" @@ -941,7 +941,7 @@ instance : ToJson SearchableTag where toJson := SearchableTag.toJson def SearchableTag.fromJson? : Json → Except String SearchableTag - | .str "meta" => pure .meta + | .str "meta" => pure .metavar | .str "keyword" => pure .keyword | .str "literalIdent" => pure .literalIdent | .str "ws" => pure .ws @@ -967,7 +967,7 @@ def searchableJson (ss : Array (SearchableTag × String)) : Json := partial def searchable (cat : Name) (txt : TaggedText GrammarTag) : Array (SearchableTag × String) := (go txt *> get).run' #[] |> fixup where - dots : SearchableTag × String := (.meta, "…") + dots : SearchableTag × String := (.metavar, "…") go : TaggedText GrammarTag → StateM (Array (SearchableTag × String)) String | .text s => do ws s @@ -1007,9 +1007,9 @@ where if st.isEmpty then return st -- Don't parenthesize just "..." | ")" | ")?" | ")*" => - if let some st' := suffixMatches #[(· == (.meta, "(")) , (· == dots)] st then return st'.push dots + if let some st' := suffixMatches #[(· == (.metavar, "(")) , (· == dots)] st then return st'.push dots | _ => pure () - return st.push (.meta, s) + return st.push (.metavar, s) pure s | .tag other txt => do go txt @@ -1019,7 +1019,7 @@ where | `command => Id.run do -- Drop leading ellipses from commands for h : i in [0:s.size] do - if s[i] ∉ [dots, (.meta, "?"), (.ws, " ")] then return s.extract i s.size + if s[i] ∉ [dots, (.metavar, "?"), (.ws, " ")] then return s.extract i s.size return s | _ => s ws (s : String) : StateM (Array (SearchableTag × String)) Unit := do @@ -1051,7 +1051,7 @@ where -- Don't push ellipsis onto ellipsis if let some _ := suffixMatches #[(· == dots)] st then st -- Don't alternate ellipses - else if let some st' := suffixMatches #[(· == dots), (· == (.meta, "|"))] st then st'.push dots + else if let some st' := suffixMatches #[(· == dots), (· == (.metavar, "|"))] st then st'.push dots else st.push dots @@ -1061,19 +1061,19 @@ where /-- info: some #[(Manual.SearchableTag.keyword, "aaa")] -/ #guard_msgs in -#eval searchable.suffixMatches #[(· == (.meta, "(")), (· == searchable.dots)] #[(.keyword, "aaa"),(.meta, "("), (.ws, " "),(.meta, "…")] +#eval searchable.suffixMatches #[(· == (.metavar, "(")), (· == searchable.dots)] #[(.keyword, "aaa"),(.metavar, "("), (.ws, " "),(.metavar, "…")] /-- info: some #[(Manual.SearchableTag.keyword, "aaa")] -/ #guard_msgs in -#eval searchable.suffixMatches #[(· == searchable.dots)] #[(.keyword, "aaa"),(.meta, "…"), (.ws, " ")] +#eval searchable.suffixMatches #[(· == searchable.dots)] #[(.keyword, "aaa"),(.metavar, "…"), (.ws, " ")] /-- info: some #[] -/ #guard_msgs in -#eval searchable.suffixMatches #[(· == searchable.dots)] #[(.meta, "…"), (.ws, " ")] +#eval searchable.suffixMatches #[(· == searchable.dots)] #[(.metavar, "…"), (.ws, " ")] /-- info: some #[] -/ #guard_msgs in -#eval searchable.suffixMatches #[(· == searchable.dots)] #[(.meta, "…")] +#eval searchable.suffixMatches #[(· == searchable.dots)] #[(.metavar, "…")] open Manual.Meta.PPrint Grammar in /-- @@ -1440,7 +1440,7 @@ partial def grammar.descr : BlockDescr where pure .empty extraCss := [grammarCss, "#toc .split-toc > ol .syntax .keyword { font-family: var(--verso-code-font-family); font-weight: 600; }"] extraJs := [highlightingJs, grammarJs] - extraJsFiles := [("popper.js", popper), ("tippy.js", tippy)] + extraJsFiles := [{filename := "popper.js", contents := popper}, {filename := "tippy.js", contents := tippy}] extraCssFiles := [("tippy-border.css", tippy.border.css)] localContentItem _ json _ := open Verso.Output.Html in do if let .arr #[_, .arr #[_, .arr toks]] := json then @@ -1536,7 +1536,7 @@ def syntaxKind.inlinedescr : InlineDescr where pure <| .seq #[← go b, .raw "\n"] extraCss := [grammarCss] extraJs := [highlightingJs, grammarJs] - extraJsFiles := [("popper.js", popper), ("tippy.js", tippy)] + extraJsFiles := [{filename := "popper.js", contents := popper}, {filename := "tippy.js", contents := tippy}] extraCssFiles := [("tippy-border.css", tippy.border.css)] toHtml := open Verso.Output.Html in diff --git a/Manual/Meta/Tactics.lean b/Manual/Meta/Tactics.lean index 275096db..2e04abb6 100644 --- a/Manual/Meta/Tactics.lean +++ b/Manual/Meta/Tactics.lean @@ -594,7 +594,7 @@ def proofState.descr : BlockDescr where toTeX := none extraCss := [highlightingStyle, proofStateStyle] extraJs := [highlightingJs] - extraJsFiles := [("popper.js", popper), ("tippy.js", tippy)] + extraJsFiles := [{filename := "popper.js", contents := popper}, {filename := "tippy.js", contents := tippy}] extraCssFiles := [("tippy-border.css", tippy.border.css)] toHtml := open Verso.Output.Html in diff --git a/Manual/ModuleSystem.lean b/Manual/ModuleSystem.lean new file mode 100644 index 00000000..0655c0bb --- /dev/null +++ b/Manual/ModuleSystem.lean @@ -0,0 +1,164 @@ +/- +Copyright (c) 2025 Lean FRO LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Author: Sebastian Ullrich +-/ + +import VersoManual +import Manual.Meta + +open Verso.Genre Manual +open Verso.Genre.Manual.InlineLean + +#doc (Manual) "The Module System" => +%%% +number := false +%%% + +The module system is an experimental feature that allows for more fine-grained control over what information is exported from, and imported into, Lean files. + +The main benefits of doing so are: + +: Average build times + + Changes to files that affect only non-exported information (e.g. proofs) will not trigger rebuilds outside of these files. + Even when dependent files have to be rebuilt, those files that cannot be affected according to the `import` annotations can be skipped. + +: API evolution + + Library authors can trust that changes to non-exported information will not affect downstream users of their library. + +: Avoiding accidental unfolding + + Limiting the scope in which definitions can be unfolded allows for avoiding both reductions that should be replaced by application of more specific theorems as well as unproductive reductions that were not in fact necessary. + +: Smaller executables + + Separating compile-time and run-time code allows for more aggressive dead code elimination. + +The module system is activated by prefixing a Lean file with the `module` keyword. +`module`s can only import other `module`s so adoption has to be done top-down. +Non-`module`s can import `module`s and will ignore all module-system-specific annotations. + +At the time of writing, the module system is considered experimental and additionally guarded by the `experimental.module` option that has to be set to `true` in the project's Lake configuration file. +Of libraries shipped with Lean, only `Init` is currently fully ported. +The language semantics described below are close to final, but not all benefits described above are implemented yet. + +# Visibility + +The main distinction the module system introduces is between the {deftech}_public scope_ that contains all information visible to other modules via `import` and the {deftech}_private scope_ that is not imported by default. +Both declarations and imports themselves are scoped in this way. + +The default scope is private. +The new modifier `public` before a declaration or import puts it into the public scope instead. +No information from the private scope can be used in the public scope to ensure information in the latter still makes sense when only it is imported into other modules. + +:::TODO +These examples should be output-checked and elaborated with `-Dexperimental.module=true` +::: + +``` +module + +def priv : Nat := 0 + +public abbrev pub : Nat := priv -- error: unknown identifier `priv` +``` + +`public section` can be used to switch the default scope for declarations, with `private` locally negating it. +This is mostly intended to ease porting while avoiding merge conflicts. + +Marking a declaration as public at minimum makes its "signature", i.e. its name and type, visible. +Some specific declarations/terms may still put other parts in the private scope as follows: +* `by` used in the public scope to prove a proposition puts the resulting proof in the private scope (by wrapping it in a public helper theorem). +* `def` puts its body in the private scope by default. The defined constant cannot be unfolded when used in the public scope. +This can be changed with the `@[expose]` attribute. +`@[expose] section` can be used to apply the attribute to all `def`s in the section and can locally be negated by `@[no_expose]`. +* `theorem` and `opaque` never expose their body. +Consider using `@[expose] def` instead if exposition is absolutely necessary. +* `abbrev` and `instance` always expose their body. +For `instance`, individual field values can be marked `private`, which can be useful for programming purposes. +For proof fields, `by` already does the trick. +``` +module + +def myInternalHelper (x : Nat) : String := ... + +public instance : ToString Nat where + toString x := private myInternalHelper x +``` + +## Import Visibility + +The basic form `public import M` makes the public scope of `M` available in the public scope of the current module. The private scope of `M` is discarded. +Without `public`, the public scope of `M` is instead imported into the {tech}[private scope]. +The import thus is irrelevant to downstream modules and ignored by them. + +`import all M` behaves like `import M` but additional imports the private scope of `M` into the private scope of the current module. +This is only allowed if `M` and the current module have the same module name root, as its main purpose is to allow for separating definitions and proofs into separate modules for internal organization of a library. +``` +-- Module M.Defs +module + +public def f : Nat := 0 +``` +``` +-- Module M.Lemmas +module + +import all M.Defs + +public theorem f_eq_zero : f = 0 := + -- may access body of `f` imported into the private scope + rfl +``` +Note that the imported private scope includes private `import`s of `M`, including nested `import all`s that then are interpreted likewise. +That is, the set of private scopes accessible to the current module is the transitive closure of `import all` declarations. + +The module system's `import all` is more powerful than `import` without the module system. +It makes imported private definitions accessible directly by name, as if they were defined in the current module. +Thus another use case of `import all` is to make declarations available that need to be used in multiple modules but should not leak outside the current library. + +`public import all M` behaves like `public import M` followed by `import all M`, i.e. the `all` modifier becomes irrelevant for downstream modules. + +# The `meta` Phase + +When it comes to actual code execution, there is no point to a definition without a body. +Thus, in order to eagerly know what definitions _might_ be executed at compile time and so need to be available including their bodies (in some executable shape), any definition used as an entry point to compile-time execution has to be tagged with the new `meta` modifier. +This is automatically done in built-in metaprogramming syntax such as `syntax`, `macro`, and `elab` but may need to be done explicitly when manually applying metaprogramming attributes such as `@[app_delab]`. + +A `meta` definition may access (and thus invoke) any `meta` or non-`meta` definition of the current module. +For accessing imported definitions, the definition must either have been marked as `meta` when it was declared or the import must be marked as such (`meta import` when the accessing definition is in the private scope and `public meta import` otherwise). + +``` +module + +meta import Std.Data.HashMap + +local elab "my_elab" : command => do + let m : Std.HashMap := {} + ... +``` + +# Common Errors and Patterns + +The following list contains common errors one might encounter when using the module system and especially porting existing files to the module system. + +: Unknown constant + + Check whether you might be trying to access a private definition in the public scope. + If so, you might want to make the current declaration private as well or otherwise enter the private scope such as through `private` on a field or `by` for a proof. + TODO: improve error message. + + If the message is prefixed with `(interpreter)`, this suggests a missing `meta import`. + The new import should be placed in the file defining the metaprogram depending on the missing constant, which is not necessarily the file triggering the error. + Note that the language server always does `meta import`s for the benefit of `#eval` etc., so the error might only occur in a cmdline build. + TODO: better, static `meta` checking. + +: Definitional equality errors, especially after porting + + You are likely missing an {attr}`expose` attribute on a definition or alternatively, if imported, an `import all`. + Prefer the former if anyone outside your library might feasible require the same access. + {keywordOf Lean.reduceCmd}`#reduce` and/or {option}`trace.Meta.isDefEq` can help with finding the blocking definition. + You might also see this as a kernel error when a tactic directly emits proof terms referencing specific declarations without going through the elaborator, such as for proof by reflection. + In this case, there is no readily available trace for debugging; consider using `@[expose] section`s generously on the closure of relevant modules. \ No newline at end of file diff --git a/Manual/Monads.lean b/Manual/Monads.lean index 7b4b8195..ac195f28 100644 --- a/Manual/Monads.lean +++ b/Manual/Monads.lean @@ -81,7 +81,7 @@ def LenList.head (xs : LenList (n + 1) α) : α := xs.list.head <| by intro h cases xs - simp_all [List.length] + simp_all subst_eqs def LenList.tail (xs : LenList (n + 1) α) : LenList n α := @@ -120,7 +120,8 @@ The well-behaved {name}`Monad` instance takes the diagonal of the results of app ```lean @[simp] -theorem LenList.list_length_eq (xs : LenList n α) : xs.list.length = n := by +theorem LenList.list_length_eq (xs : LenList n α) : + xs.list.length = n := by cases xs simp [*] diff --git a/Manual/Monads/Lift.lean b/Manual/Monads/Lift.lean index b7cca446..5e1f44b3 100644 --- a/Manual/Monads/Lift.lean +++ b/Manual/Monads/Lift.lean @@ -125,16 +125,16 @@ fun {α} act => liftM act : {α : Type} → BaseIO α → EIO IO.Error α ::::example "Lifting Transformed Monads" There are also instances of {name}`MonadLift` for most of the standard library's {tech}[monad transformers], so base monad actions can be used in transformed monads without additional work. For example, state monad actions can be lifted across reader and exception transformers, allowing compatible monads to be intermixed freely: -````lean (keep := false) +```lean (keep := false) def incrBy (n : Nat) : StateM Nat Unit := modify (· + n) def incrOrFail : ReaderT Nat (ExceptT String (StateM Nat)) Unit := do if (← read) > 5 then throw "Too much!" incrBy (← read) -```` +``` Disabling lifting causes an error: -````lean (name := noLift) (error := true) +```lean (name := noLift) (error := true) set_option autoLift false def incrBy (n : Nat) : StateM Nat Unit := modify (. + n) @@ -142,7 +142,7 @@ def incrBy (n : Nat) : StateM Nat Unit := modify (. + n) def incrOrFail : ReaderT Nat (ExceptT String (StateM Nat)) Unit := do if (← read) > 5 then throw "Too much!" incrBy (← read) -```` +``` ```leanOutput noLift type mismatch incrBy __do_lift✝ @@ -212,7 +212,8 @@ def getByte (n : Nat) : Except String UInt8 := pure n.toUInt8 else throw s!"Out of range: {n}" -def getBytes (input : Array Nat) : StateT (Array UInt8) (Except String) Unit := do +def getBytes (input : Array Nat) : + StateT (Array UInt8) (Except String) Unit := do input.forM fun i => liftM (Except.tryCatch (some <$> getByte i) fun _ => pure none) >>= fun @@ -233,7 +234,10 @@ Ideally, state updates would be performed within the {name}`tryCatch` call direc Attempting to save bytes and handled exceptions does not work, however, because the arguments to {name}`Except.tryCatch` have type {lean}`Except String Unit`: ```lean (error := true) (name := getBytesErr) (keep := false) -def getBytes' (input : Array Nat) : StateT (Array String) (StateT (Array UInt8) (Except String)) Unit := do +def getBytes' (input : Array Nat) : + StateT (Array String) + (StateT (Array UInt8) + (Except String)) Unit := do input.forM fun i => liftM (Except.tryCatch @@ -246,7 +250,7 @@ def getBytes' (input : Array Nat) : StateT (Array String) (StateT (Array UInt8) failed to synthesize MonadStateOf (Array String) (Except String) -Additional diagnostic information may be available using the `set_option diagnostics true` command. +Hint: Additional diagnostic information may be available using the `set_option diagnostics true` command. ``` Because {name}`StateT` has a {name}`MonadControl` instance, {name}`control` can be used instead of {name}`liftM`. @@ -254,7 +258,10 @@ It provides the inner action with an interpreter for the outer monad. In the case of {name}`StateT`, this interpreter expects that the inner monad returns a tuple that includes the updated state, and takes care of providing the initial state and extracting the updated state from the tuple. ```lean -def getBytes' (input : Array Nat) : StateT (Array String) (StateT (Array UInt8) (Except String)) Unit := do +def getBytes' (input : Array Nat) : + StateT (Array String) + (StateT (Array UInt8) + (Except String)) Unit := do input.forM fun i => control fun run => (Except.tryCatch diff --git a/Manual/Monads/Syntax.lean b/Manual/Monads/Syntax.lean index 6d6a2a64..54565770 100644 --- a/Manual/Monads/Syntax.lean +++ b/Manual/Monads/Syntax.lean @@ -563,7 +563,9 @@ When iterating over multiple collections, iteration stops when any of the collec When iterating over the valid indices for an array with {keywordOf Lean.Parser.Term.doFor}`for`, naming the membership proof allows the tactic that searches for proofs that array indices are in bounds to succeed. ```lean (keep := false) -def satisfyingIndices (p : α → Prop) [DecidablePred p] (xs : Array α) : Array Nat := Id.run do +def satisfyingIndices + (p : α → Prop) [DecidablePred p] + (xs : Array α) : Array Nat := Id.run do let mut out := #[] for h : i in [0:xs.size] do if p xs[i] then out := out.push i diff --git a/Manual/Monads/Zoo.lean b/Manual/Monads/Zoo.lean index 7566db06..0c4cb646 100644 --- a/Manual/Monads/Zoo.lean +++ b/Manual/Monads/Zoo.lean @@ -263,7 +263,7 @@ Only the outermost may be used, because the type of the state is an output param failed to synthesize MonadState String M -Additional diagnostic information may be available using the `set_option diagnostics true` command. +Hint: Additional diagnostic information may be available using the `set_option diagnostics true` command. ``` Providing the state type explicitly using {name}`getThe` from {name}`MonadStateOf` allows both states to be read. diff --git a/Manual/Namespaces.lean b/Manual/Namespaces.lean index b1ca3a98..4612d9c3 100644 --- a/Manual/Namespaces.lean +++ b/Manual/Namespaces.lean @@ -144,7 +144,10 @@ The section's name must be used to close it. end ``` ```leanOutput english4 -invalid 'end', name is missing (expected Greetings) +Missing name after `end`: Expected the current scope name `Greetings` + +Hint: To end the current scope `Greetings`, specify its name: + end ̲G̲r̲e̲e̲t̲i̲n̲g̲s̲ ``` ```lean @@ -216,7 +219,7 @@ An {keywordOf Lean.Parser.Command.end}`end` command cannot close all three due t end A.D.E ``` ```leanOutput endADE -invalid 'end', name mismatch (expected «».D.E) +Invalid name after `end`: Expected `D.E`, but found `A.D.E` ``` Instead, namespaces and sections must be ended separately. ```lean @@ -353,7 +356,8 @@ automatically included section variable(s) unused in theorem 'p_all': pFifteen consider restructuring your `variable` declarations so that the variables are not in scope or explicitly omit them: omit pFifteen in theorem ... -note: this linter can be disabled with `set_option linter.unusedSectionVars false` + +Note: This linter can be disabled with `set_option linter.unusedSectionVars false` ``` This can be avoided by using {keywordOf Lean.Parser.Command.omit}`omit`to remove {lean}`pFifteen`: diff --git a/Manual/NotationsMacros.lean b/Manual/NotationsMacros.lean index a7598813..4477a0da 100644 --- a/Manual/NotationsMacros.lean +++ b/Manual/NotationsMacros.lean @@ -918,9 +918,9 @@ some 4 :::example "Scoped Macros" Scoped macro rules are active only in their namespace. When the namespace `ConfusingNumbers` is open, numeric literals will be assigned an incorrect meaning. -````lean +```lean namespace ConfusingNumbers -```` +``` The following macro recognizes terms that are odd numeric literals, and replaces them with double their value. If it unconditionally replaced them with double their value, then macro expansion would become an infinite loop because the same rule would always match the output. @@ -934,9 +934,9 @@ scoped macro_rules ``` Once the namespace ends, the macro is no longer used. -````lean +```lean end ConfusingNumbers -```` +``` Without opening the namespace, numeric literals function in the usual way. ```lean (name := nums1) diff --git a/Manual/NotationsMacros/Delab.lean b/Manual/NotationsMacros/Delab.lean index 0fb22f75..149db9e1 100644 --- a/Manual/NotationsMacros/Delab.lean +++ b/Manual/NotationsMacros/Delab.lean @@ -121,7 +121,7 @@ v : Solo ``` This proof state shows the constructor using {tech}[structure instance] syntax. An unexpander can be used to override this choice. -Because {name}`Solo.mk` cannot be applied to any arguments, the unexpander is free to ignore the syntax, which will always be {lean (type := "UnexpandM Syntax")}``` `(Solo.mk) ```. +Because {name}`Solo.mk` cannot be applied to any arguments, the unexpander is free to ignore the syntax, which will always be {lean (type := "UnexpandM Syntax")}`` `(Solo.mk) ``. ```lean @[app_unexpander Solo.mk] diff --git a/Manual/NotationsMacros/Elab.lean b/Manual/NotationsMacros/Elab.lean index 334d0353..b9b788dc 100644 --- a/Manual/NotationsMacros/Elab.lean +++ b/Manual/NotationsMacros/Elab.lean @@ -261,7 +261,7 @@ numerals are polymorphic in Lean, but the numeral `5` cannot be used in a contex Int → Int due to the absence of the instance above -Additional diagnostic information may be available using the `set_option diagnostics true` command. +Hint: Additional diagnostic information may be available using the `set_option diagnostics true` command. ``` ::: diff --git a/Manual/NotationsMacros/Operators.lean b/Manual/NotationsMacros/Operators.lean index 50ae19df..9159f7fb 100644 --- a/Manual/NotationsMacros/Operators.lean +++ b/Manual/NotationsMacros/Operators.lean @@ -196,7 +196,7 @@ However, because the new operator is not associative, the {tech}[local longest-m failed to synthesize HAdd Prop Prop ?m.38 -Additional diagnostic information may be available using the `set_option diagnostics true` command. +Hint: Additional diagnostic information may be available using the `set_option diagnostics true` command. ``` ::: @@ -223,7 +223,7 @@ numerals are polymorphic in Lean, but the numeral `2` cannot be used in a contex Prop due to the absence of the instance above -Additional diagnostic information may be available using the `set_option diagnostics true` command. +Hint: Additional diagnostic information may be available using the `set_option diagnostics true` command. ``` The new operator is not associative, so the {tech}[local longest-match rule] means that only {name}`HAdd.hAdd` applies to the three-argument version: @@ -234,7 +234,7 @@ The new operator is not associative, so the {tech}[local longest-match rule] mea failed to synthesize HAdd Prop Prop ?m.20 -Additional diagnostic information may be available using the `set_option diagnostics true` command. +Hint: Additional diagnostic information may be available using the `set_option diagnostics true` command. ``` ::: diff --git a/Manual/RecursiveDefs.lean b/Manual/RecursiveDefs.lean index bc248bbf..79309f0d 100644 --- a/Manual/RecursiveDefs.lean +++ b/Manual/RecursiveDefs.lean @@ -221,7 +221,7 @@ The recursive function {name}`nextPrime` inefficiently computes the next prime n Because there are infinitely many prime numbers, it always terminates; however, formulating this proof would be nontrivial. It is thus marked {keyword}`partial`. -````lean +```lean def isPrime (n : Nat) : Bool := Id.run do for i in [2:n] do if i * i > n then return true @@ -231,7 +231,7 @@ def isPrime (n : Nat) : Bool := Id.run do partial def nextPrime (n : Nat) : Nat := let n := n + 1 if isPrime n then n else nextPrime n -```` +``` It is nonetheless possible to prove that the following two functions are equal: ```lean @@ -440,7 +440,7 @@ However, {lean}`Clause` is semireducible, so the {inst}`ToString String` instanc failed to synthesize ToString Clause -Additional diagnostic information may be available using the `set_option diagnostics true` command. +Hint: Additional diagnostic information may be available using the `set_option diagnostics true` command. ``` The instance can be explicitly enabled by creating a {lean}`ToString Clause` instance that reduces to the {lean}`ToString String` instance. @@ -471,7 +471,7 @@ attribute [irreducible] Sequence #check let xs : Sequence Nat := .ofList [1,2,3]; xs.reverse ``` ```leanOutput irredSeq -invalid field 'reverse', the environment does not contain 'Sequence.reverse' +Invalid field `reverse`: The environment does not contain `Sequence.reverse` xs has type Sequence Nat diff --git a/Manual/RecursiveDefs/PartialFixpoint.lean b/Manual/RecursiveDefs/PartialFixpoint.lean index 60f7f02f..bb89745d 100644 --- a/Manual/RecursiveDefs/PartialFixpoint.lean +++ b/Manual/RecursiveDefs/PartialFixpoint.lean @@ -175,8 +175,10 @@ The error message on the recursive call is: ```leanOutput nonTailPos Could not prove 'List.findIndex' to be monotone in its recursive calls: Cannot eliminate recursive call `List.findIndex ys p` enclosed in - have r := ys✝.findIndex p; - if r = -1 then -1 else r + 1 + if ys✝.findIndex p = -1 then -1 else ys✝.findIndex p + 1 + Tried to apply 'monotone_ite', but failed. + Possible cause: A missing `MonoBind` instance. + Use `set_option trace.Elab.Tactic.monotonicity true` to debug. ``` ::: diff --git a/Manual/RecursiveDefs/Structural.lean b/Manual/RecursiveDefs/Structural.lean index e1b8308b..00437f87 100644 --- a/Manual/RecursiveDefs/Structural.lean +++ b/Manual/RecursiveDefs/Structural.lean @@ -17,6 +17,8 @@ open Verso.Genre.Manual.InlineLean open Lean.Elab.Tactic.GuardMsgs.WhitespaceMode +set_option guard_msgs.diff true + #doc (Manual) "Structural Recursion" => %%% tag := "structural-recursion" @@ -157,7 +159,8 @@ The decreasing parameter's type must be an inductive type. In {lean}`notInductive`, a function is specified as the decreasing parameter: ```lean (error := true) (name := badnoindct) -def notInductive (x : Nat → Nat) : Nat := notInductive (fun n => x (n+1)) +def notInductive (x : Nat → Nat) : Nat := + notInductive (fun n => x (n+1)) termination_by structural x ``` ```leanOutput badnoindct @@ -573,7 +576,7 @@ set_option pp.all true in trace: [Elab.definition.body] half : Nat → Nat := fun (x : Nat) => half.match_1.{1} (fun (x : Nat) => Nat) x (fun (_ : Unit) => Nat.zero) (fun (_ : Unit) => Nat.zero) - fun (n : Nat) => Nat.succ (half n) + fun (n : Nat) => Nat.succ (_root_.half n) -/ #guard_msgs in def half : Nat → Nat diff --git a/Manual/RecursiveDefs/Structural/CourseOfValuesExample.lean b/Manual/RecursiveDefs/Structural/CourseOfValuesExample.lean index d3583611..d7051e36 100644 --- a/Manual/RecursiveDefs/Structural/CourseOfValuesExample.lean +++ b/Manual/RecursiveDefs/Structural/CourseOfValuesExample.lean @@ -21,7 +21,7 @@ open Lean.Elab.Tactic.GuardMsgs.WhitespaceMode This definition is equivalent to {name}`List.below`: ```lean def List.below' {α : Type u} {motive : List α → Sort u} : - List α → Sort (max 1 u) + List α → Sort (max (u + 1) u) | [] => PUnit | _ :: xs => motive xs ×' xs.below' (motive := motive) ``` @@ -29,7 +29,7 @@ def List.below' {α : Type u} {motive : List α → Sort u} : ```lean (show := false) theorem List.below_eq_below' : @List.below = @List.below' := by funext α motive xs - induction xs <;> simp [List.below, below'] + induction xs <;> simp [below'] congr ``` @@ -46,7 +46,7 @@ inductive Tree (α : Type u) : Type u where Its corresponding course-of-values table contains the realizations of the motive for all subtrees: ```lean def Tree.below' {α : Type u} {motive : Tree α → Sort u} : - Tree α → Sort (max 1 u) + Tree α → Sort (max (u + 1) u) | .leaf => PUnit | .branch left _val right => (motive left ×' left.below' (motive := motive)) ×' @@ -58,9 +58,9 @@ theorem Tree.below_eq_below' : @Tree.below = @Tree.below' := by funext α motive t induction t next => - simp [Tree.below, Tree.below'] + simp [Tree.below'] next ihl ihr => - simp [Tree.below, Tree.below', ihl, ihr] + simp [Tree.below', ihl, ihr] ``` diff --git a/Manual/RecursiveDefs/WF.lean b/Manual/RecursiveDefs/WF.lean index 9251c39d..69c8d41c 100644 --- a/Manual/RecursiveDefs/WF.lean +++ b/Manual/RecursiveDefs/WF.lean @@ -73,7 +73,7 @@ tag := "wf-rel" A relation `≺` is a {deftech}_well-founded relation_ if there exists no infinitely descending chain -$$`` x_0 ≻ x_1 ≻ \cdots`` +$$` x_0 ≻ x_1 ≻ \cdots` In Lean, types that are equipped with a canonical well-founded relation are instances of the {name}`WellFoundedRelation` type class. diff --git a/Manual/RecursiveDefs/WF/PreprocessExample.lean b/Manual/RecursiveDefs/WF/PreprocessExample.lean index 5969a305..0deac9c8 100644 --- a/Manual/RecursiveDefs/WF/PreprocessExample.lean +++ b/Manual/RecursiveDefs/WF/PreprocessExample.lean @@ -176,7 +176,9 @@ macro "sizeOf_pair_dec" : tactic => omega done) -macro_rules | `(tactic| decreasing_trivial) => `(tactic| sizeOf_pair_dec) +macro_rules + | `(tactic| decreasing_trivial) => + `(tactic| sizeOf_pair_dec) def Tree.map (f : α → β) : Tree α → Tree β | leaf x => leaf (f x) diff --git a/Manual/Releases.lean b/Manual/Releases.lean index 46126070..4963a199 100644 --- a/Manual/Releases.lean +++ b/Manual/Releases.lean @@ -6,34 +6,35 @@ Author: Joachim Breitner import VersoManual -import Manual.Releases.«v4.21.0» -import Manual.Releases.«v4.20.1» -import Manual.Releases.«v4.20.0» -import Manual.Releases.«v4.19.0» -import Manual.Releases.«v4.18.0» -import Manual.Releases.«v4.17.0» -import Manual.Releases.«v4.16.0» -import Manual.Releases.«v4.15.0» -import Manual.Releases.«v4.14.0» -import Manual.Releases.«v4.13.0» -import Manual.Releases.«v4.12.0» -import Manual.Releases.«v4.11.0» -import Manual.Releases.«v4.10.0» -import Manual.Releases.«v4.9.0» -import Manual.Releases.«v4.8.0» -import Manual.Releases.«v4.7.0» -import Manual.Releases.«v4.6.0» -import Manual.Releases.«v4.5.0» -import Manual.Releases.«v4.4.0» -import Manual.Releases.«v4.3.0» -import Manual.Releases.«v4.2.0» -import Manual.Releases.«v4.1.0» -import Manual.Releases.«v4.0.0» -import Manual.Releases.«v4.0.0-m5» -import Manual.Releases.«v4.0.0-m4» -import Manual.Releases.«v4.0.0-m3» -import Manual.Releases.«v4.0.0-m2» -import Manual.Releases.«v4.0.0-m1» +import Manual.Releases.«v4_22_0» +import Manual.Releases.«v4_21_0» +import Manual.Releases.«v4_20_1» +import Manual.Releases.«v4_20_0» +import Manual.Releases.«v4_19_0» +import Manual.Releases.«v4_18_0» +import Manual.Releases.«v4_17_0» +import Manual.Releases.«v4_16_0» +import Manual.Releases.«v4_15_0» +import Manual.Releases.«v4_14_0» +import Manual.Releases.«v4_13_0» +import Manual.Releases.«v4_12_0» +import Manual.Releases.«v4_11_0» +import Manual.Releases.«v4_10_0» +import Manual.Releases.«v4_9_0» +import Manual.Releases.«v4_8_0» +import Manual.Releases.«v4_7_0» +import Manual.Releases.«v4_6_0» +import Manual.Releases.«v4_5_0» +import Manual.Releases.«v4_4_0» +import Manual.Releases.«v4_3_0» +import Manual.Releases.«v4_2_0» +import Manual.Releases.«v4_1_0» +import Manual.Releases.«v4_0_0» +import Manual.Releases.«v4_0_0-m5» +import Manual.Releases.«v4_0_0-m4» +import Manual.Releases.«v4_0_0-m3» +import Manual.Releases.«v4_0_0-m2» +import Manual.Releases.«v4_0_0-m1» open Manual open Verso.Genre @@ -49,58 +50,60 @@ This section provides release notes about recent versions of Lean. When updating read the corresponding release notes. They may contain advice that will help you understand the differences with the previous version and upgrade your projects. -{include 0 Manual.Releases.«v4.21.0»} +{include 0 Manual.Releases.«v4_22_0»} -{include 0 Manual.Releases.«v4.20.1»} +{include 0 Manual.Releases.«v4_21_0»} -{include 0 Manual.Releases.«v4.20.0»} +{include 0 Manual.Releases.«v4_20_1»} -{include 0 Manual.Releases.«v4.19.0»} +{include 0 Manual.Releases.«v4_20_0»} -{include 0 Manual.Releases.«v4.18.0»} +{include 0 Manual.Releases.«v4_19_0»} -{include 0 Manual.Releases.«v4.17.0»} +{include 0 Manual.Releases.«v4_18_0»} -{include 0 Manual.Releases.«v4.16.0»} +{include 0 Manual.Releases.«v4_17_0»} -{include 0 Manual.Releases.«v4.15.0»} +{include 0 Manual.Releases.«v4_16_0»} -{include 0 Manual.Releases.«v4.14.0»} +{include 0 Manual.Releases.«v4_15_0»} -{include 0 Manual.Releases.«v4.13.0»} +{include 0 Manual.Releases.«v4_14_0»} -{include 0 Manual.Releases.«v4.12.0»} +{include 0 Manual.Releases.«v4_13_0»} -{include 0 Manual.Releases.«v4.11.0»} +{include 0 Manual.Releases.«v4_12_0»} -{include 0 Manual.Releases.«v4.10.0»} +{include 0 Manual.Releases.«v4_11_0»} -{include 0 Manual.Releases.«v4.9.0»} +{include 0 Manual.Releases.«v4_10_0»} -{include 0 Manual.Releases.«v4.8.0»} +{include 0 Manual.Releases.«v4_9_0»} -{include 0 Manual.Releases.«v4.7.0»} +{include 0 Manual.Releases.«v4_8_0»} -{include 0 Manual.Releases.«v4.6.0»} +{include 0 Manual.Releases.«v4_7_0»} -{include 0 Manual.Releases.«v4.5.0»} +{include 0 Manual.Releases.«v4_6_0»} -{include 0 Manual.Releases.«v4.4.0»} +{include 0 Manual.Releases.«v4_5_0»} -{include 0 Manual.Releases.«v4.3.0»} +{include 0 Manual.Releases.«v4_4_0»} -{include 0 Manual.Releases.«v4.2.0»} +{include 0 Manual.Releases.«v4_3_0»} -{include 0 Manual.Releases.«v4.1.0»} +{include 0 Manual.Releases.«v4_2_0»} -{include 0 Manual.Releases.«v4.0.0»} +{include 0 Manual.Releases.«v4_1_0»} -{include 0 Manual.Releases.«v4.0.0-m5»} +{include 0 Manual.Releases.«v4_0_0»} -{include 0 Manual.Releases.«v4.0.0-m4»} +{include 0 Manual.Releases.«v4_0_0-m5»} -{include 0 Manual.Releases.«v4.0.0-m3»} +{include 0 Manual.Releases.«v4_0_0-m4»} -{include 0 Manual.Releases.«v4.0.0-m2»} +{include 0 Manual.Releases.«v4_0_0-m3»} -{include 0 Manual.Releases.«v4.0.0-m1»} +{include 0 Manual.Releases.«v4_0_0-m2»} + +{include 0 Manual.Releases.«v4_0_0-m1»} diff --git a/Manual/Releases/v4.0.0-m1.lean b/Manual/Releases/v4_0_0-m1.lean similarity index 98% rename from Manual/Releases/v4.0.0-m1.lean rename to Manual/Releases/v4_0_0-m1.lean index 73b0a391..79d136eb 100644 --- a/Manual/Releases/v4.0.0-m1.lean +++ b/Manual/Releases/v4_0_0-m1.lean @@ -18,7 +18,7 @@ tag := "release-v4.0.0-m1" file := "v4.0.0-m1" %%% -`````markdown +```markdown The Lean development team is proud to announce the first milestone release of Lean 4. This release is aimed at experimentation with the new features of Lean 4, eventually leading to a full release of 4.0.0 ready for general use. This release is the result of almost three years of work since the release of Lean 3.4.0, reworking, extending, and improving almost all aspects of Lean. More information about Lean 4 can be found in the [official documentation](https://leanprover.github.io/lean4/doc/) as well as in the introductory talk ["An overview of Lean 4"](https://www.youtube.com/watch?v=UeGvhfW1v9M) at Lean Together 2021. @@ -30,4 +30,4 @@ Leonardo de Moura & Sebastian Ullrich * Marc Huisinga and Wojciech Nawrocki - Lean Server * Joe Hendrix, Andrew Kent, Rob Dockins, Simon Winwood (Galois Inc) - early adopters, suggestions, feedback * Daan Leijen, Simon Peyton Jones, Nikhil Swamy, Sebastian Graf, Max Wagner - design discussions, feedback, suggestions -````` +``` diff --git a/Manual/Releases/v4.0.0-m2.lean b/Manual/Releases/v4_0_0-m2.lean similarity index 96% rename from Manual/Releases/v4.0.0-m2.lean rename to Manual/Releases/v4_0_0-m2.lean index 50ffd72d..7d5e6700 100644 --- a/Manual/Releases/v4.0.0-m2.lean +++ b/Manual/Releases/v4_0_0-m2.lean @@ -18,6 +18,6 @@ tag := "release-v4.0.0-m2" file := "v4.0.0-m2" %%% -`````markdown +```markdown This is the second milestone release of Lean 4. With too many improvements and bug fixes in almost all parts of the system to list, we would like to single out major improvements to `simp` and other built-in tactics as well as support for a goal view that make the proving experience more comfortable. -````` +``` diff --git a/Manual/Releases/v4.0.0-m3.lean b/Manual/Releases/v4_0_0-m3.lean similarity index 96% rename from Manual/Releases/v4.0.0-m3.lean rename to Manual/Releases/v4_0_0-m3.lean index 3d40441c..c417453e 100644 --- a/Manual/Releases/v4.0.0-m3.lean +++ b/Manual/Releases/v4_0_0-m3.lean @@ -11,6 +11,7 @@ import Manual.Meta.Markdown open Manual open Verso.Genre +set_option linter.verso.markup.codeBlock false #doc (Manual) "Lean 4.0.0-m3 (2022-01-31)" => %%% @@ -18,7 +19,7 @@ tag := "release-v4.0.0-m3" file := "v4.0.0-m3" %%% -`````markdown +````markdown This is the third milestone release of Lean 4, and the last planned milestone before an official release. With almost 3000 commits improving and extending many parts of the system since the last milestone, we are now close to completing all main features we have envisioned for Lean 4. @@ -73,4 +74,4 @@ $ git shortlog -s -n v4.0.0-m2..v4.0.0-m3 1 zygi 1 Бакиновский Максим ``` -````` +```` diff --git a/Manual/Releases/v4.0.0-m4.lean b/Manual/Releases/v4_0_0-m4.lean similarity index 99% rename from Manual/Releases/v4.0.0-m4.lean rename to Manual/Releases/v4_0_0-m4.lean index 715b385a..1ad97fe2 100644 --- a/Manual/Releases/v4.0.0-m4.lean +++ b/Manual/Releases/v4_0_0-m4.lean @@ -18,7 +18,7 @@ tag := "release-v4.0.0-m4" file := "v4.0.0-m4" %%% -`````markdown +````markdown This is the fourth milestone release of Lean 4. It contains many improvements and many new features. We had more than 600 commits since the last milestone. @@ -336,4 +336,4 @@ For example, given `f : Nat → Nat` and `g : Nat → Nat`, `f.comp g` is now no end Lean.Elab ``` -````` +```` diff --git a/Manual/Releases/v4.0.0-m5.lean b/Manual/Releases/v4_0_0-m5.lean similarity index 99% rename from Manual/Releases/v4.0.0-m5.lean rename to Manual/Releases/v4_0_0-m5.lean index 1e5402bf..88ab98b4 100644 --- a/Manual/Releases/v4.0.0-m5.lean +++ b/Manual/Releases/v4_0_0-m5.lean @@ -18,7 +18,7 @@ tag := "release-v4.0.0-m5" file := "v4.0.0-m5" %%% -`````markdown +````markdown This is the fifth milestone release of Lean 4. It contains many improvements and many new features. We had 1495 commits since the last milestone. @@ -775,4 +775,4 @@ For example, the goal * [Add tutorial-like examples to our documentation](https://github.com/leanprover/lean4/tree/master/doc/examples), rendered using LeanInk+Alectryon. -````` +```` diff --git a/Manual/Releases/v4.0.0.lean b/Manual/Releases/v4_0_0.lean similarity index 99% rename from Manual/Releases/v4.0.0.lean rename to Manual/Releases/v4_0_0.lean index 77c53dfc..f06a3795 100644 --- a/Manual/Releases/v4.0.0.lean +++ b/Manual/Releases/v4_0_0.lean @@ -18,7 +18,7 @@ tag := "release-v4.0.0" file := "v4.0.0" %%% -`````markdown +````markdown * [`Lean.Meta.getConst?` has been renamed](https://github.com/leanprover/lean4/pull/2454). We have renamed `getConst?` to `getUnfoldableConst?` (and `getConstNoEx?` to `getUnfoldableConstNoEx?`). These were not intended to be part of the public API, but downstream projects had been using them @@ -137,4 +137,4 @@ file := "v4.0.0" * Many new doc strings have been added to declarations at `Init`. -````` +```` diff --git a/Manual/Releases/v4.10.0.lean b/Manual/Releases/v4_10_0.lean similarity index 99% rename from Manual/Releases/v4.10.0.lean rename to Manual/Releases/v4_10_0.lean index 9188e938..58063fa4 100644 --- a/Manual/Releases/v4.10.0.lean +++ b/Manual/Releases/v4_10_0.lean @@ -18,7 +18,7 @@ tag := "release-v4.10.0" file := "v4.10.0" %%% -`````markdown +````markdown ### Language features, tactics, and metaprograms * `split` tactic: @@ -298,4 +298,4 @@ file := "v4.10.0" * Sometimes terms created via a sequence of unifications will be more eta reduced than before and proofs will require adaptation ([#4387](https://github.com/leanprover/lean4/pull/4387)). * The `GetElem` class has been split into two; see the docstrings for `GetElem` and `GetElem?` for more information ([#4560](https://github.com/leanprover/lean4/pull/4560)). -````` +```` diff --git a/Manual/Releases/v4.11.0.lean b/Manual/Releases/v4_11_0.lean similarity index 99% rename from Manual/Releases/v4.11.0.lean rename to Manual/Releases/v4_11_0.lean index 5a306492..ad2c636f 100644 --- a/Manual/Releases/v4.11.0.lean +++ b/Manual/Releases/v4_11_0.lean @@ -18,7 +18,7 @@ tag := "release-v4.11.0" file := "v4.11.0" %%% -`````markdown +````markdown ### Language features, tactics, and metaprograms * The variable inclusion mechanism has been changed. Like before, when a definition mentions a variable, Lean will add it as an argument of the definition, but now in theorem bodies, variables are not included based on usage in order to ensure that changes to the proof cannot change the statement of the overall theorem. Instead, variables are only available to the proof if they have been mentioned in the theorem header or in an **`include` command** or are instance implicit and depend only on such variables. The **`omit` command** can be used to omit included variables. @@ -354,4 +354,4 @@ and related error handling. For porting, a new option `deprecated.oldSectionVars` is included to locally switch back to the old behavior. -````` +```` diff --git a/Manual/Releases/v4.12.0.lean b/Manual/Releases/v4_12_0.lean similarity index 99% rename from Manual/Releases/v4.12.0.lean rename to Manual/Releases/v4_12_0.lean index 5dd2dae4..c716be31 100644 --- a/Manual/Releases/v4.12.0.lean +++ b/Manual/Releases/v4_12_0.lean @@ -18,7 +18,7 @@ tag := "release-v4.12.0" file := "v4.12.0" %%% -`````markdown +````markdown ### Language features, tactics, and metaprograms * `bv_decide` tactic. This release introduces a new tactic for proving goals involving `BitVec` and `Bool`. It reduces the goal to a SAT instance that is refuted by an external solver, and the resulting LRAT proof is checked in Lean. This is used to synthesize a proof of the goal by reflection. As this process uses verified algorithms, proofs generated by this tactic use `Lean.ofReduceBool`, so this tactic includes the Lean compiler as part of the trusted code base. The external solver CaDiCaL is included with Lean and does not need to be installed separately to make use of `bv_decide`. @@ -329,4 +329,4 @@ file := "v4.12.0" * query functions use the term `get` instead of `find`, ([#4943](https://github.com/leanprover/lean4/pull/4943)) * the notation `map[key]` no longer returns an optional value but instead expects a proof that the key is present in the map. The previous behavior is available via the `map[key]?` notation. -````` +```` diff --git a/Manual/Releases/v4.13.0.lean b/Manual/Releases/v4_13_0.lean similarity index 99% rename from Manual/Releases/v4.13.0.lean rename to Manual/Releases/v4_13_0.lean index 50f94e3c..09547897 100644 --- a/Manual/Releases/v4.13.0.lean +++ b/Manual/Releases/v4_13_0.lean @@ -18,7 +18,7 @@ tag := "release-v4.13.0" file := "v4.13.0" %%% -`````markdown +```markdown **Full Changelog**: https://github.com/leanprover/lean4/compare/v4.12.0...v4.13.0 ### Language features, tactics, and metaprograms @@ -329,4 +329,4 @@ file := "v4.13.0" * [#5489](https://github.com/leanprover/lean4/pull/5489) commit `lake-manifest.json` when updating `lean-pr-testing` branches * [#5490](https://github.com/leanprover/lean4/pull/5490) use separate secrets for commenting and branching in `pr-release.yml` -````` +``` diff --git a/Manual/Releases/v4.14.0.lean b/Manual/Releases/v4_14_0.lean similarity index 99% rename from Manual/Releases/v4.14.0.lean rename to Manual/Releases/v4_14_0.lean index 5427bce3..833ff80f 100644 --- a/Manual/Releases/v4.14.0.lean +++ b/Manual/Releases/v4_14_0.lean @@ -18,7 +18,7 @@ tag := "release-v4.14.0" file := "v4.14.0" %%% -`````markdown +````markdown **Full Changelog**: https://github.com/leanprover/lean4/compare/v4.13.0...v4.14.0 @@ -299,4 +299,4 @@ file := "v4.14.0" * The `--lean` CLI option for `lake` was removed. Use the `LEAN` environment variable instead. ([#5684](https://github.com/leanprover/lean4/pull/5684)) * The `inductive ... :=`, `structure ... :=`, and `class ... :=` syntaxes have been deprecated in favor of the `... where` variants. The old syntax produces a warning, controlled by the `linter.deprecated` option. ([#5542](https://github.com/leanprover/lean4/pull/5542)) * The generated tactic configuration elaborators now land in `TacticM` to make use of the current recovery state. Commands that wish to elaborate configurations should now use `declare_command_config_elab` instead of `declare_config_elab` to get an elaborator landing in `CommandElabM`. Syntaxes should migrate to `optConfig` instead of `(config)?`, but the elaborators are reverse compatible. ([#5883](https://github.com/leanprover/lean4/pull/5883)) -````` +```` diff --git a/Manual/Releases/v4.15.0.lean b/Manual/Releases/v4_15_0.lean similarity index 99% rename from Manual/Releases/v4.15.0.lean rename to Manual/Releases/v4_15_0.lean index 1739948d..7736ec44 100644 --- a/Manual/Releases/v4.15.0.lean +++ b/Manual/Releases/v4_15_0.lean @@ -18,7 +18,7 @@ tag := "release-v4.15.0" file := "v4.15.0" %%% -`````markdown +````markdown ## Language @@ -662,4 +662,4 @@ profiler output. - [#6259](https://github.com/leanprover/lean4/pull/6259) ensures that nesting trace nodes are annotated with timing information iff `trace.profiler` is active. -````` +```` diff --git a/Manual/Releases/v4.16.0.lean b/Manual/Releases/v4_16_0.lean similarity index 99% rename from Manual/Releases/v4.16.0.lean rename to Manual/Releases/v4_16_0.lean index 6493c147..489ac823 100644 --- a/Manual/Releases/v4.16.0.lean +++ b/Manual/Releases/v4_16_0.lean @@ -18,7 +18,7 @@ tag := "release-v4.16.0" file := "v4.16.0" %%% -`````markdown +````markdown ## Highlights ### Unique `sorry`s @@ -583,4 +583,4 @@ the universe parameter. * [#6363](https://github.com/leanprover/lean4/pull/6363) fixes errors at load time in the comparison mode of the Firefox profiler. -````` +```` diff --git a/Manual/Releases/v4.17.0.lean b/Manual/Releases/v4_17_0.lean similarity index 99% rename from Manual/Releases/v4.17.0.lean rename to Manual/Releases/v4_17_0.lean index 23c5ddee..f34b4008 100644 --- a/Manual/Releases/v4.17.0.lean +++ b/Manual/Releases/v4_17_0.lean @@ -11,6 +11,8 @@ import Manual.Meta.Markdown open Manual open Verso.Genre +-- TODO: investigate why the Markdown elaboration is taking this much stack in the new compiler +set_option maxRecDepth 9500 #doc (Manual) "Lean 4.17.0 (2025-03-03)" => %%% @@ -18,7 +20,7 @@ tag := "release-v4.17.0" file := "v4.17.0" %%% -`````markdown +````markdown For this release, 319 changes landed. In addition to the 168 feature additions and 57 fixes listed below there were 12 refactoring changes, 13 documentation improvements and 56 chores. @@ -1127,4 +1129,4 @@ new `changelog-*` labels and "..." conventions. * [#6542](https://github.com/leanprover/lean4/pull/6542) introduces a script that automates checking whether major downstream repositories have been updated for a new toolchain release. -````` +```` diff --git a/Manual/Releases/v4.18.0.lean b/Manual/Releases/v4_18_0.lean similarity index 99% rename from Manual/Releases/v4.18.0.lean rename to Manual/Releases/v4_18_0.lean index cae8ad28..5d383b95 100644 --- a/Manual/Releases/v4.18.0.lean +++ b/Manual/Releases/v4_18_0.lean @@ -11,6 +11,8 @@ import Manual.Meta.Markdown open Manual open Verso.Genre +-- TODO: investigate why this is needed in the new compiler +set_option maxRecDepth 9900 #doc (Manual) "Lean 4.18.0 (2025-04-02)" => %%% @@ -18,7 +20,7 @@ tag := "release-v4.18.0" file := "v4.18.0" %%% -`````markdown +````markdown For this release, 344 changes landed. In addition to the 166 feature additions and 38 fixes listed below there were 13 refactoring changes, 10 documentation improvements, 3 performance improvements, 4 improvements to the test suite and 109 other changes. ## Highlights @@ -154,7 +156,7 @@ Important Library updates include: `<` on `Option`. Significant development has been made in the verification APIs of `BitVec` -and fixed-width integer types (`IntX`), along with ongoing work to align +and fixed-width integer types (`IntX`), along with ongoing work to align `List/Array/Vector` APIs. Several lemmas about `Int.ediv/fdiv/tdiv` have been strengthened. @@ -208,9 +210,9 @@ _This summary of highlights was contributed by Violetta Sim._ * [#6947](https://github.com/leanprover/lean4/pull/6947) adds the `binderNameHint` gadget. It can be used in rewrite and simp rules to preserve a user-provided name where possible. - + The expression `binderNameHint v binder e` defined to be `e`. - + If it is used on the right-hand side of an equation that is applied by a tactic like `rw` or `simp`, and `v` is a local variable, and `binder` is an expression that (after beta-reduction) is a binder (so `fun w => …` or `∀ w, …`), @@ -956,7 +958,7 @@ The `grind` tactic is still is experimental and still under development. Avoid u **Breaking Change**: The semantic highlighting request handler is not a pure request handler anymore, but a stateful one. Notably, this means that clients - that extend the semantic highlighting of the Lean language server with the + that extend the semantic highlighting of the Lean language server with the `chainLspRequestHandler` function must now use the `chainStatefulLspRequestHandler` function instead. @@ -1131,4 +1133,4 @@ The `grind` tactic is still is experimental and still under development. Avoid u added this OLEAN_OUT to LEAN_PATH no oleans were found there and the search fell back to the stage1 installation location. -````` +```` diff --git a/Manual/Releases/v4.19.0.lean b/Manual/Releases/v4_19_0.lean similarity index 99% rename from Manual/Releases/v4.19.0.lean rename to Manual/Releases/v4_19_0.lean index cafa2362..0838a4e0 100644 --- a/Manual/Releases/v4.19.0.lean +++ b/Manual/Releases/v4_19_0.lean @@ -11,6 +11,8 @@ import Manual.Meta.Markdown open Manual open Verso.Genre +-- TODO: figure out why this is needed with the new compiler +set_option maxRecDepth 11000 #doc (Manual) "Lean 4.19.0 (2025-05-01)" => %%% @@ -18,7 +20,7 @@ tag := "release-v4.19.0" file := "v4.19.0" %%% -`````markdown +````markdown For this release, 420 changes landed. In addition to the 164 feature additions and 78 fixes listed below there were 13 refactoring changes, 29 documentation improvements, 31 performance improvements, 9 improvements to the test suite and 94 other changes. ## Highlights @@ -1303,4 +1305,4 @@ See the Library section below for details. `merge_remote.py` and `release_steps.py` scripts when needed. -````` +```` diff --git a/Manual/Releases/v4.1.0.lean b/Manual/Releases/v4_1_0.lean similarity index 99% rename from Manual/Releases/v4.1.0.lean rename to Manual/Releases/v4_1_0.lean index 7a7b9a9b..368f220c 100644 --- a/Manual/Releases/v4.1.0.lean +++ b/Manual/Releases/v4_1_0.lean @@ -18,7 +18,7 @@ tag := "release-v4.1.0" file := "v4.1.0" %%% -`````markdown +```markdown * The error positioning on missing tokens has been [improved](https://github.com/leanprover/lean4/pull/2393). In particular, this should make it easier to spot errors in incomplete tactic proofs. * After elaborating a configuration file, Lake will now cache the configuration to a `lakefile.olean`. Subsequent runs of Lake will import this OLean instead of elaborating the configuration file. This provides a significant performance improvement (benchmarks indicate that using the OLean cuts Lake's startup time in half), but there are some important details to keep in mind: @@ -37,4 +37,4 @@ file := "v4.1.0" and this has been removed in favour of an additional field of `Rewrite.Config`. It was not previously accessible from user tactics. -````` +``` diff --git a/Manual/Releases/v4.20.0.lean b/Manual/Releases/v4_20_0.lean similarity index 99% rename from Manual/Releases/v4.20.0.lean rename to Manual/Releases/v4_20_0.lean index 4182dbd8..7526ce8f 100644 --- a/Manual/Releases/v4.20.0.lean +++ b/Manual/Releases/v4_20_0.lean @@ -11,6 +11,8 @@ import Manual.Meta.Markdown open Manual open Verso.Genre +-- TODO: figure out why this is needed with the new codegen +set_option maxRecDepth 9000 #doc (Manual) "Lean 4.20.0 (2025-06-02)" => %%% @@ -18,7 +20,7 @@ tag := "release-v4.20.0" file := "v4.20.0" %%% -`````markdown +````markdown For this release, 346 changes landed. In addition to the 108 feature additions and 85 fixes listed below there were 6 refactoring changes, 7 documentation improvements, 8 performance improvements, 4 improvements to the test suite and 126 other changes. ## Highlights @@ -1140,4 +1142,4 @@ Other notable library developments in this release include: the missing check. -````` +```` diff --git a/Manual/Releases/v4.20.1.lean b/Manual/Releases/v4_20_1.lean similarity index 96% rename from Manual/Releases/v4.20.1.lean rename to Manual/Releases/v4_20_1.lean index 8c4a58ce..9b886f0f 100644 --- a/Manual/Releases/v4.20.1.lean +++ b/Manual/Releases/v4_20_1.lean @@ -18,6 +18,6 @@ tag := "release-v4.20.1" file := "v4.20.1" %%% -`````markdown +```markdown The 4.20.1 point release addresses a metaprogramming regression in `Lean.Environment.addDeclCore` ([#8610](https://github.com/leanprover/lean4/pull/8610)). -````` +``` diff --git a/Manual/Releases/v4.21.0.lean b/Manual/Releases/v4_21_0.lean similarity index 99% rename from Manual/Releases/v4.21.0.lean rename to Manual/Releases/v4_21_0.lean index 27c7c033..1e728edc 100644 --- a/Manual/Releases/v4.21.0.lean +++ b/Manual/Releases/v4_21_0.lean @@ -12,14 +12,13 @@ open Manual open Verso.Genre -#doc (Manual) "Lean 4.21.0-rc3" => +#doc (Manual) "Lean 4.21.0 (2025-06-30)" => %%% tag := "release-v4.21.0" file := "v4.21.0" %%% -`````markdown -(These are the preliminary release notes for `v4.21.0-rc3`.) +````markdown For this release, 295 changes landed. In addition to the 100 feature additions and 83 fixes listed below there were 2 refactoring changes, 4 documentation improvements, 6 performance improvements, 2 improvements to the test suite and 98 other changes. @@ -330,7 +329,7 @@ _Other Highlights_ * [#8330](https://github.com/leanprover/lean4/pull/8330) improves support for structure extensionality in `grind`. It now uses eta expansion for structures instead of the extensionality theorems generated by `[ext]`. Examples: - + ```lean opaque f (a : Nat) : Nat × Bool @@ -458,7 +457,7 @@ _Other Highlights_ -- and non-choronological backtracking is used to avoid searching -- (2^8 - 1) irrelevant branches /-- - trace: + trace: [grind.split] p8 ∨ q8, generation: 0 [grind.split] p7 ∨ q7, generation: 0 [grind.split] p6 ∨ q6, generation: 0 @@ -897,8 +896,8 @@ _Other Highlights_ ## Server -* [#7665](https://github.com/leanprover/lean4/pull/7665) and [#8180](https://github.com/leanprover/lean4/pull/8180) add - support for code actions that resolve 'unknown identifier' errors by either importing the missing declaration or by +* [#7665](https://github.com/leanprover/lean4/pull/7665) and [#8180](https://github.com/leanprover/lean4/pull/8180) add + support for code actions that resolve 'unknown identifier' errors by either importing the missing declaration or by changing the identifier to one from the environment. * [#8091](https://github.com/leanprover/lean4/pull/8091) improves the performance of the workspace symbol request. @@ -948,4 +947,4 @@ _Other Highlights_ because it silently ignores the missing definition. -````` +```` diff --git a/Manual/Releases/v4_22_0.lean b/Manual/Releases/v4_22_0.lean new file mode 100644 index 00000000..5be3403e --- /dev/null +++ b/Manual/Releases/v4_22_0.lean @@ -0,0 +1,1409 @@ +/- +Copyright (c) 2025 Lean FRO LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Author: Anne Baanen +-/ + +import VersoManual + +import Manual.Meta.Markdown + +open Manual +open Verso.Genre + + +#doc (Manual) "Lean 4.22.0-rc3 (2025-07-04)" => +%%% +tag := "release-v4.22.0" +file := "v4.22.0" +%%% + +````markdown +For this release, 468 changes landed. In addition to the 185 feature additions and 85 fixes listed below there were 15 refactoring changes, 5 documentation improvements, 4 performance improvements, 0 improvements to the test suite and 174 other changes. + +## Language + +* [#6672](https://github.com/leanprover/lean4/pull/6672) filters out all declarations from `Lean.*`, `*.Tactic.*`, and + `*.Linter.*` from the results of `exact?` and `rw?`. + +* [#7395](https://github.com/leanprover/lean4/pull/7395) changes the `show t` tactic to match its documentation. + Previously it was a synonym for `change t`, but now it finds the first + goal that unifies with the term `t` and moves it to the front of the + goal list. + +* [#7639](https://github.com/leanprover/lean4/pull/7639) changes the generated `below` and `brecOn` implementations for + reflexive inductive types to support motives in `Sort u` rather than + `Type u`. + +* [#8337](https://github.com/leanprover/lean4/pull/8337) adjusts the experimental module system to not export any private + declarations from modules. + +* [#8373](https://github.com/leanprover/lean4/pull/8373) enables transforming nondependent `let`s into `have`s in a + number of contexts: the bodies of nonrecursive definitions, equation + lemmas, smart unfolding definitions, and types of theorems. A motivation + for this change is that when zeta reduction is disabled, `simp` can only + effectively rewrite `have` expressions (e.g. `split` uses `simp` with + zeta reduction disabled), and so we cache the nondependence calculations + by transforming `let`s to `have`s. The transformation can be disabled + using `set_option cleanup.letToHave false`. + +* [#8387](https://github.com/leanprover/lean4/pull/8387) improves the error messages produced by `end` and prevents + invalid `end` commands from closing scopes on failure. + +* [#8419](https://github.com/leanprover/lean4/pull/8419) introduces an explicit `defeq` attribute to mark theorems that + can be used by `dsimp`. The benefit of an explicit attribute over the + prior logic of looking at the proof body is that we can reliably omit + theorem bodies across module boundaries. It also helps with intra-file + parallelism. + +* [#8519](https://github.com/leanprover/lean4/pull/8519) makes the equational theorems of non-exposed defs private. If + the author of a module chose not to expose the body of their function, + then they likely don't want that implementation to leak through + equational theorems. Helps with #8419. + +* [#8543](https://github.com/leanprover/lean4/pull/8543) adds typeclasses for `grind` to embed types into `Int`, for + cutsat. This allows, for example, treating `Fin n`, or Mathlib's `ℕ+` in + a uniform and extensible way. + +* [#8568](https://github.com/leanprover/lean4/pull/8568) modifies the `structure` elaborator to add local terminfo for + structure fields and explicit parent projections, enabling "go to + definition" when there are dependent fields. + +* [#8574](https://github.com/leanprover/lean4/pull/8574) adds an additional diff mode to the error-message hint + suggestion widget that displays diffs per word rather than per + character. + +* [#8596](https://github.com/leanprover/lean4/pull/8596) makes `guard_msgs.diff=true` the default. The main usage of + `#guard_msgs` is for writing tests, and this makes staring at altered + test outputs considerably less tiring. + +* [#8609](https://github.com/leanprover/lean4/pull/8609) uses `grind` to shorten some proofs in the LRAT checker. The + intention is not particularly to improve the quality or maintainability + of these proofs (although hopefully this is a side effect), but just to + give `grind` a work out. + +* [#8619](https://github.com/leanprover/lean4/pull/8619) fixes an internalization (aka preprocessing) issue in `grind` + when applying injectivity theorems. + +* [#8621](https://github.com/leanprover/lean4/pull/8621) fixes a bug in the equality-resolution procedure used by + `grind`. + The procedure now performs a topological sort so that every simplified + theorem declaration is emitted **before** any place where it is + referenced. + Previously, applying equality resolution to + ```lean + h : ∀ x, p x a → ∀ y, p y b → x ≠ y + ``` + in the example + ```lean + example + (p : Nat → Nat → Prop) + (a b c : Nat) + (h : ∀ x, p x a → ∀ y, p y b → x ≠ y) + (h₁ : p c a) + (h₂ : p c b) : + False := by + grind + ``` + caused `grind` to produce the incorrect term + ```lean + p ?y a → ∀ y, p y b → False + ``` + The patch eliminates this error, and the following correct simplified + theorem is generated + ```lean + ∀ y, p y a → p y b → False + ``` + +* [#8622](https://github.com/leanprover/lean4/pull/8622) adds a test case / use case example for `grind`, setting up the + very basics of `IndexMap`, modelled on Rust's + [`indexmap`](https://docs.rs/indexmap/latest/indexmap/). It is not + intended as a complete implementation: just enough to exercise `grind`. + +* [#8625](https://github.com/leanprover/lean4/pull/8625) improves the diagnostic information produced by `grind` when it + succeeds. We now include the list of case-splits performed, and the + number of application per function symbol. + +* [#8633](https://github.com/leanprover/lean4/pull/8633) implements case-split tracking in `grind`. The information is + displayed when `grind` fails or diagnostic information is requested. + Examples: + + - Failure + +* [#8637](https://github.com/leanprover/lean4/pull/8637) adds background theorems for normalizing `IntModule` expressions + using reflection. + +* [#8638](https://github.com/leanprover/lean4/pull/8638) improves the diagnostic information produced by `grind`. It now + sorts the equivalence classes by generation and then `Expr. lt`. + +* [#8639](https://github.com/leanprover/lean4/pull/8639) completes the `ToInt` family of typeclasses which `grind` will + use to embed types into the integers for `cutsat`. It contains instances + for the usual concrete data types (`Fin`, `UIntX`, `IntX`, `BitVec`), + and is extensible (e.g. for Mathlib's `PNat`). + +* [#8641](https://github.com/leanprover/lean4/pull/8641) adds the `#print sig $ident` variant of the `#print` command, + which omits the body. This is useful for testing meta-code, in the + ``` + #guard_msgs (drop trace, all) in #print sig foo + ``` + idiom. The benefit over `#check` is that it shows the declaration kind, + reducibility attributes (and in the future more built-in attributes, + like `@[defeq]` in #8419). (One downside is that `#check` shows unused + function parameter names, e.g. in induction principles; this could + probably be refined.) + +* [#8645](https://github.com/leanprover/lean4/pull/8645) adds many helper theorems for the future `IntModule` linear + arithmetic procedure in `grind`. + It also adds helper theorems for normalizing input atoms and support for + disequality in the new linear arithmetic procedure in `grind`. + +* [#8650](https://github.com/leanprover/lean4/pull/8650) adds helper theorems for coefficient normalization and equality + detection. This theorems are for the linear arithmetic procedure in + `grind`. + +* [#8662](https://github.com/leanprover/lean4/pull/8662) adds a `warn.sorry` option (default true) that logs the + "declaration uses 'sorry'" warning when declarations contain `sorryAx`. + When false, the warning is not logged. + +* [#8670](https://github.com/leanprover/lean4/pull/8670) adds helper theorems that will be used to interface the + `CommRing` module with the linarith procedure in `grind`. + +* [#8671](https://github.com/leanprover/lean4/pull/8671) allow structures to have non-bracketed binders, making it + consistent with `inductive`. + +* [#8677](https://github.com/leanprover/lean4/pull/8677) adds the basic infrastructure for the linarith module in + `grind`. + +* [#8680](https://github.com/leanprover/lean4/pull/8680) adds the `reify?` and `denoteExpr` for the new linarith module + in `grind`. + +* [#8682](https://github.com/leanprover/lean4/pull/8682) uses the `CommRing` module to normalize linarith inequalities. + +* [#8687](https://github.com/leanprover/lean4/pull/8687) implements the infrastructure for constructing proof terms in + the linarith procedure in `grind`. It also adds the `ToExpr` instances + for the reified objects. + +* [#8689](https://github.com/leanprover/lean4/pull/8689) implements proof term generation for the `CommRing` and + `linarith` interface. It also fixes the `CommRing` helper theorems. + +* [#8690](https://github.com/leanprover/lean4/pull/8690) implements the main framework of the model search procedure for + the linarith component in grind. It currently handles only inequalities. + It can already solve simple goals such as + ```lean + example [IntModule α] [Preorder α] [IntModule.IsOrdered α] (a b c : α) + : a < b → b < c → c < a → False := by + grind + +* [#8693](https://github.com/leanprover/lean4/pull/8693) fixes the denotation functions used to interface the ring and + linarith modules in grind. + +* [#8694](https://github.com/leanprover/lean4/pull/8694) implements special support for `One.one` in linarith when the + structure is a ordered ring. It also fixes bugs during initialization. + +* [#8697](https://github.com/leanprover/lean4/pull/8697) implements support for inequalities in the `grind` linear + arithmetic procedure and simplifies its design. Some examples that can + already be solved: + ```lean + open Lean.Grind + example [IntModule α] [Preorder α] [IntModule.IsOrdered α] (a b c d : α) + : a + d < c → b = a + (2:Int)*d → b - d > c → False := by + grind + +* [#8708](https://github.com/leanprover/lean4/pull/8708) fixes an internalization bug in the interface between linarith + and ring modules in `grind`. The `CommRing` module may create new terms + during normalization. + +* [#8713](https://github.com/leanprover/lean4/pull/8713) fixes a bug in the commutative ring module used in `grind`. It + was missing simplification opportunities. + +* [#8715](https://github.com/leanprover/lean4/pull/8715) implements the basic infrastructure for processing disequalities + in the `grind linarith` module. We still have to implement backtracking. + +* [#8723](https://github.com/leanprover/lean4/pull/8723) implements a `finally` section following a (potentially empty) + `where` block. `where ... finally` opens a tactic sequence block in + which the goals are the unassigned metavariables from the definition + body and its auxiliary definitions that arise from use of `let rec` and + `where`. + +* [#8730](https://github.com/leanprover/lean4/pull/8730) adds support for throwing named errors with associated error + explanations. In particular, it adds elaborators for the syntax defined + in #8649, which use the error-explanation infrastructure added in #8651. + This includes completions, hovers, and jump-to-definition for error + names. + +* [#8733](https://github.com/leanprover/lean4/pull/8733) implements disequality splitting and non-chronological + backtracking for the `grind` linarith procedure. + ```lean + example [IntModule α] [LinearOrder α] [IntModule.IsOrdered α] (a b c d : α) + : a ≤ b → a - c ≥ 0 + d → d ≤ 0 → d ≥ 0 → b = c → a ≠ b → False := by + grind + ``` + +* [#8751](https://github.com/leanprover/lean4/pull/8751) adds the `nondep` field of `Expr.letE` to the C++ data model. + Previously this field has been unused, and in followup PRs the + elaborator will use it to encode `have` expressions (non-dependent + `let`s). The kernel does not verify that `nondep` is correctly applied + during typechecking. The `letE` delaborator now prints `have`s when + `nondep` is true, though `have` still elaborates as `letFun` for now. + Breaking change: `Expr.updateLet!` is renamed to `Expr.updateLetE!`. + +* [#8753](https://github.com/leanprover/lean4/pull/8753) fixes a bug in `simp` where it was not resetting the set of + zeta-delta reduced let definitions between `simp` calls. It also fixes a + bug where `simp` would report zeta-delta reduced let definitions that + weren't given as simp arguments (these extraneous let definitions appear + due to certain processes temporarily setting `zetaDelta := true`). This + PR also modifies the metaprogramming interface for the zeta-delta + tracking functions to be re-entrant and to prevent this kind of no-reset + bug from occurring again. Closes #6655. + +* [#8756](https://github.com/leanprover/lean4/pull/8756) implements counterexamples for grind linarith. Example: + ```lean + example [CommRing α] [LinearOrder α] [Ring.IsOrdered α] (a b c d : α) + : b ≥ 0 → c > b → d > b → a ≠ b + c → a > b + c → a < b + d → False := by + grind + ``` + produces the counterexample + ``` + a := 7/2 + b := 1 + c := 2 + d := 3 + ``` + +* [#8759](https://github.com/leanprover/lean4/pull/8759) implements model-based theory combination for grind linarith. + Example: + ```lean + example [CommRing α] [LinearOrder α] [Ring.IsOrdered α] (f : α → α → α) (x y z : α) + : z ≤ x → x ≤ 1 → z = 1 → f x y = 2 → f 1 y = 2 := by + grind + ``` + +* [#8763](https://github.com/leanprover/lean4/pull/8763) corrects the handling of explicit `monotonicity` proofs for + mutual `partial_fixpoint` definitions. + +* [#8773](https://github.com/leanprover/lean4/pull/8773) implements support for the heterogeneous `(k : Nat) * (a : R)` + in ordered modules. Example: + ```lean + variable (R : Type u) [IntModule R] [LinearOrder R] [IntModule.IsOrdered R] + +* [#8774](https://github.com/leanprover/lean4/pull/8774) adds an option for disabling the cutsat procedure in `grind`. + The linarith module takes over linear integer/nat constraints. Example: + + ```lean + set_option trace.grind.cutsat.assert true in -- cutsat should **not** process the following constraints + example (x y z : Int) (h1 : 2 * x < 3 * y) (h2 : -4 * x + 2 * z < 0) : ¬ 12*y - 4* z < 0 := by + grind -cutsat -- `linarith` module solves it + ``` + +* [#8775](https://github.com/leanprover/lean4/pull/8775) adds a `grind` normalization theorem for `Int.negSucc`. Example: + + ```lean + example (p : Int) (n : Nat) (hmp : Int.negSucc (n + 1) + 1 = p) + (hnm : Int.negSucc (n + 1 + 1) + 1 = Int.negSucc (n + 1)) : p = Int.negSucc n := by + grind + ``` + +* [#8776](https://github.com/leanprover/lean4/pull/8776) ensures that user provided `natCast` application are properly + internalized in the grind cutsat module. + +* [#8777](https://github.com/leanprover/lean4/pull/8777) implements basic `Field` support in the commutative ring module + in `grind`. It is just division by numerals for now. Examples: + ```lean + open Lean Grind + +* [#8780](https://github.com/leanprover/lean4/pull/8780) makes Lean code generation respect the module name provided + through `lean --setup`. + +* [#8786](https://github.com/leanprover/lean4/pull/8786) improves the support for fields in `grind`. New supported + examples: + ```lean + example [Field α] [IsCharP α 0] (x : α) : x ≠ 0 → (4 / x)⁻¹ * ((3 * x^3) / x)^2 * ((1 / (2 * x))⁻¹)^3 = 18 * x^8 := by grind + example [Field α] (a : α) : 2 * a ≠ 0 → 1 / a + 1 / (2 * a) = 3 / (2 * a) := by grind + example [Field α] [IsCharP α 0] (a : α) : 1 / a + 1 / (2 * a) = 3 / (2 * a) := by grind + example [Field α] [IsCharP α 0] (a b : α) : 2*b - a = a + b → 1 / a + 1 / (2 * a) = 3 / b := by grind + example [Field α] [NoNatZeroDivisors α] (a : α) : 1 / a + 1 / (2 * a) = 3 / (2 * a) := by grind + example [Field α] {x y z w : α} : x / y = z / w → y ≠ 0 → w ≠ 0 → x * w = z * y := by grind + example [Field α] (a : α) : a = 0 → a ≠ 1 := by grind + example [Field α] (a : α) : a = 0 → a ≠ 1 - a := by grind + ``` + +* [#8789](https://github.com/leanprover/lean4/pull/8789) implements the Rabinowitsch transformation for `Field` + disequalities in `grind`. For example, this transformation is necessary + for solving: + ```lean + example [Field α] (a : α) : a^2 = 0 → a = 0 := by + grind + ``` + +* [#8791](https://github.com/leanprover/lean4/pull/8791) ensures the `grind linarith` module is activated for any type + that implements only `IntModule`. That is, the type does not need to be + a preorder anymore. + +* [#8792](https://github.com/leanprover/lean4/pull/8792) makes the `clear_value` tactic preserve the order of variables + in the local context. This is done by adding + `Lean.MVarId.withRevertedFrom`, which reverts all local variables + starting from a given variable, rather than only the ones that depend on + it. + +* [#8794](https://github.com/leanprover/lean4/pull/8794) adds a module `Lean.Util.CollectLooseBVars` with a function + `Expr.collectLooseBVars` that collects the set of loose bound variables + in an expression. That is, it computes the set of all `i` such that + `e.hasLooseBVar i` is true. + +* [#8795](https://github.com/leanprover/lean4/pull/8795) ensures that auxliary terms are not internalized by the ring and + linarith modules. + +* [#8796](https://github.com/leanprover/lean4/pull/8796) fixes `grind linarith` term internalization and support for + `HSMul`. + +* [#8798](https://github.com/leanprover/lean4/pull/8798) adds the following instance + ``` + instance [Field α] [LinearOrder α] [Ring.IsOrdered α] : IsCharP α 0 + ``` + The goal is to ensure we do not perform unnecessary case-splits in our + test suite. + +* [#8804](https://github.com/leanprover/lean4/pull/8804) implements first-class support for nondependent let expressions + in the elaborator; recall that a let expression `let x : t := v; b` is + called *nondependent* if `fun x : t => b` typechecks, and the notation + for a nondependent let expression is `have x := v; b`. Previously we + encoded `have` using the `letFun` function, but now we make use of the + `nondep` flag in the `Expr.letE` constructor for the encoding. This has + been given full support throughout the metaprogramming interface and the + elaborator. Key changes to the metaprogramming interface: + - Local context `ldecl`s with `nondep := true` are generally treated as + `cdecl`s. This is because in the body of a `have` expression the + variable is opaque. Functions like `LocalDecl.isLet` by default return + `false` for nondependent `ldecl`s. In the rare case where it is needed, + they take an additional optional `allowNondep : Bool` flag (defaults to + `false`) if the variable is being processed in a context where the value + is relevant. + - Functions such as `mkLetFVars` by default generalize nondependent let + variables and create lambda expressions for them. The + `generalizeNondepLet` flag (default true) can be set to false if `have` + expressions should be produced instead. **Breaking change:** Uses of + `letLambdaTelescope`/`mkLetFVars` need to use `generalizeNondepLet := + false`. See the next item. + - There are now some mapping functions to make telescoping operations + more convenient. See `mapLetTelescope` and `mapLambdaLetTelescope`. + There is also `mapLetDecl` as a counterpart to `withLetDecl` for + creating `let`/`have` expressions. + - Important note about the `generalizeNondepLet` flag: it should only be + used for variables in a local context that the metaprogram "owns". Since + nondependent let variables are treated as constants in most cases, the + `value` field might refer to variables that do not exist, if for example + those variables were cleared or reverted. Using `mapLetDecl` is always + fine. + - The simplifier will cache its let dependence calculations in the + nondep field of let expressions. + - The `intro` tactic still produces *dependent* local variables. Given + that the simplifier will transform lets into haves, it would be + surprising if that would prevent `intro` from creating a local variable + whose value cannot be used. + +* [#8809](https://github.com/leanprover/lean4/pull/8809) introduces the basic theory of ordered modules over Nat (i.e. + without subtraction), for `grind`. We'll solve problems here by + embedding them in the `IntModule` envelope. + +* [#8810](https://github.com/leanprover/lean4/pull/8810) implements equality elimination in `grind linarith`. The current + implementation supports only `IntModule` and `IntModule` + + `NoNatZeroDivisors` + +* [#8813](https://github.com/leanprover/lean4/pull/8813) adds some basic lemmas about `grind` internal notions of + modules. + +* [#8815](https://github.com/leanprover/lean4/pull/8815) refactors the way simp arguments are elaborated: Instead of + changing the `SimpTheorems` structure as we go, this elaborates each + argument to a more declarative description of what it does, and then + apply those. This enables more interesting checks of simp arguments that + need to happen in the context of the eventually constructed simp context + (the checks in #8688), or after simp has run (unused argument linter + #8901). + +* [#8828](https://github.com/leanprover/lean4/pull/8828) extends the experimental module system to support resolving + private names imported (transitively) through `import all`. + +* [#8835](https://github.com/leanprover/lean4/pull/8835) defines the embedding of a `CommSemiring` into its `CommRing` + envelope, injective when the `CommSemiring` is cancellative. This will + be used by `grind` to prove results in `Nat`. + +* [#8836](https://github.com/leanprover/lean4/pull/8836) generalizes #8835 to the noncommutative case, allowing us to + embed a `Lean.Grind.Semiring` into a `Lean.Grind.Ring`. + +* [#8845](https://github.com/leanprover/lean4/pull/8845) implements the proof-by-reflection infrastructure for embedding + semiring terms as ring ones. + +* [#8847](https://github.com/leanprover/lean4/pull/8847) relaxes the assumptions for `Lean.Grind.IsCharP` from `Ring` to + `Semiring`, and provides an alternative constructor for rings. + +* [#8848](https://github.com/leanprover/lean4/pull/8848) generalizes the internal `grind` instance + ``` + instance [Field α] [LinearOrder α] [Ring.IsOrdered α] : IsCharP α 0 + ``` + to + ``` + instance [Ring α] [Preorder α] [Ring.IsOrdered α] : IsCharP α 0 + ``` + +* [#8855](https://github.com/leanprover/lean4/pull/8855) refactors `Lean.Grind.NatModule/IntModule/Ring.IsOrdered`. + +* [#8859](https://github.com/leanprover/lean4/pull/8859) shows the equivalence between `Lean.Grind.NatModule.IsOrdered` + and `Lean.Grind.IntModule.IsOrdered` over an `IntModule`. + +* [#8865](https://github.com/leanprover/lean4/pull/8865) allows `simp` to recognize and warn about simp lemmas that are + likely looping in the current simp set. It does so automatically + whenever simplification fails with the dreaded “max recursion depth” + error fails, but it can be made to do it always with `set_option + linter.loopingSimpArgs true`. This check is not on by default because it + is somewhat costly, and can warn about simp calls that still happen to + work. + +* [#8874](https://github.com/leanprover/lean4/pull/8874) skips attempting to compute a module name from the file name and + root directory (i.e., `lean -R`) if a name is already provided via `lean + --setup`. + +* [#8880](https://github.com/leanprover/lean4/pull/8880) makes `simp` consult its own cache more often, to avoid + replicating work. + +* [#8882](https://github.com/leanprover/lean4/pull/8882) adds `@[expose]` annotations to terms that appear in `grind` + proof certificates, so `grind` can be used in the module system. It's + possible/likely that I haven't identified all of them yet. + +* [#8890](https://github.com/leanprover/lean4/pull/8890) adds doc-strings to the `Lean.Grind` algebra typeclasses, as + these will appear in the reference manual explaining how to extend + `grind` algebra solvers to new types. Also removes some redundant + fields. + +* [#8892](https://github.com/leanprover/lean4/pull/8892) corrects the pretty printing of `grind` modifiers. Previously + `@[grind →]` was being pretty printed as `@[grind→ ]` (Space on the + right of the symbol, rather than left.) This fixes the pretty printing + of attributes, and preserves the presence of spaces after the symbol in + the output of `grind?`. + +* [#8893](https://github.com/leanprover/lean4/pull/8893) fixes a bug in the `dvd` propagation function in cutsat. + +* [#8901](https://github.com/leanprover/lean4/pull/8901) adds a linter (`linter.unusedSimpArgs`) that complains when a + simp argument (`simp [foo]`) is unused. It should do the right thing if + the `simp` invocation is run multiple times, e.g. inside `all_goals`. It + does not trigger when the `simp` call is inside a macro. The linter + message contains a clickable hint to remove the simp argument. + +* [#8903](https://github.com/leanprover/lean4/pull/8903) make sure that the local instance cache calculation applies more + reductions. In #2199 there was an issue where metavariables could + prevent local variables from being considered as local instances. We use + a slightly different approach that ensures that, for example, `let`s at + the ends of telescopes do not cause similar problems. These reductions + were already being calculated, so this does not require any additional + work to be done. + +* [#8909](https://github.com/leanprover/lean4/pull/8909) refactors the `NoNatZeroDivisors` to make sure it will work with + the new `Semiring` support. + +* [#8910](https://github.com/leanprover/lean4/pull/8910) adds the `NoNatZeroDivisors` instance for `OfSemiring.Q α` + +* [#8913](https://github.com/leanprover/lean4/pull/8913) cleans up `grind`'s internal order typeclasses, removing + unnecessary duplication. + +* [#8914](https://github.com/leanprover/lean4/pull/8914) modifies `let` and `have` term syntaxes to be consistent with + each other. Adds configuration options; for example, `have` is + equivalent to `let +nondep`, for *nondependent* lets. Other options + include `+usedOnly` (for `let_tmp`), `+zeta` (for `letI`/`haveI`), and + `+postponeValue` (for `let_delayed)`. There is also `let (eq := h) x := + v; b` for introducing `h : x = v` when elaborating `b`. The `eq` option + works for pattern matching as well, for example `let (eq := h) (x, y) := + p; b`. + +* [#8918](https://github.com/leanprover/lean4/pull/8918) fixes the `guard_msgs.diff` default behavior so that the default + specified in the option definition is actually used everywhere. + +* [#8921](https://github.com/leanprover/lean4/pull/8921) implements support for (commutative) semirings in `grind`. It + uses the Grothendieck completion to construct a (commutative) ring + `Lean.Grind.Ring.OfSemiring.Q α` from a (commutative) semiring `α`. This + construction is mostly useful for semirings that implement + `AddRightCancel α`. Otherwise, the function `toQ` is not injective. + Examples: + ```lean + example (x y : Nat) : x^2*y = 1 → x*y^2 = y → y*x = 1 := by + grind + +* [#8935](https://github.com/leanprover/lean4/pull/8935) adds the `+generalize` option to the `let` and `have` syntaxes. + For example, `have +generalize n := a + b; body` replaces all instances + of `a + b` in the expected type with `n` when elaborating `body`. This + can be likened to a term version of the `generalize` tactic. One can + combine this with `eq` in `have +generalize (eq := h) n := a + b; body` + as an analogue of `generalize h : n = a + b`. + +* [#8937](https://github.com/leanprover/lean4/pull/8937) changes the output universe of the generated `below` + implementation for non-reflexive inductive types to match the + implementation for reflexive inductive types in #7639. + +* [#8940](https://github.com/leanprover/lean4/pull/8940) introduces antitonicity lemmas that support the elaboration of + mixed inductive-coinductive predicates defined using the + `least_fixpoint` / `greatest_fixpoint` constructs. + +* [#8943](https://github.com/leanprover/lean4/pull/8943) adds helper theorems for normalizing semirings that do not + implement `AddRightCancel`. + +* [#8953](https://github.com/leanprover/lean4/pull/8953) implements support for normalization for commutative semirings + that do not implement `AddRightCancel`. Examples: + ```lean + variable (R : Type u) [CommSemiring R] + +* [#8954](https://github.com/leanprover/lean4/pull/8954) adds a procedure that efficiently transforms `let` expressions + into `have` expressions (`Meta.letToHave`). This is exposed as the + `let_to_have` tactic. + +* [#8955](https://github.com/leanprover/lean4/pull/8955) fixes `Lean.MVarId.deltaLocalDecl`, which previously replaced + the local definition with the target. + +* [#8957](https://github.com/leanprover/lean4/pull/8957) adds configuration options to the `let`/`have` tactic syntaxes. + For example, `let (eq := h) x := v` adds `h : x = v` to the local + context. The configuration options are the same as those for the + `let`/`have` term syntaxes. + +* [#8958](https://github.com/leanprover/lean4/pull/8958) improves the case splitting strategy used in `grind`, and + ensures `grind` also considers simple `match`-conditions for + case-splitting. Example: + + ```lean + example (x y : Nat) + : 0 < match x, y with + | 0, 0 => 1 + | _, _ => x + y := by -- x or y must be greater than 0 + grind + ``` + +* [#8959](https://github.com/leanprover/lean4/pull/8959) add instances showing that the Grothendieck (i.e. additive) + envelope of a semiring is an ordered ring if the original semiring is + ordered (and satisfies ExistsAddOfLE), and in this case the embedding is + monotone. + +* [#8963](https://github.com/leanprover/lean4/pull/8963) embeds a NatModule into its IntModule completion, which is + injective when we have AddLeftCancel, and monotone when the modules are + ordered. Also adds some (failing) grind test cases that can be verified + once `grind` uses this embedding. + +* [#8964](https://github.com/leanprover/lean4/pull/8964) adds `@[expose]` attributes to proof terms constructed by + `grind` that need to be evaluated in the kernel. + +* [#8965](https://github.com/leanprover/lean4/pull/8965) revises @[grind] annotations on Nat bitwise operations. + +* [#8968](https://github.com/leanprover/lean4/pull/8968) adds the following features to `simp`: + - A routine for simplifying `have` telescopes in a way that avoids + quadratic complexity arising from locally nameless expression + representations, like what #6220 did for `letFun` telescopes. + Furthermore, simp converts `letFun`s into `have`s (nondependent lets), + and we remove the #6220 routine since we are moving away from `letFun` + encodings of nondependent lets. + - A `+letToHave` configuration option (enabled by default) that converts + lets into haves when possible, when `-zeta` is set. Previously Lean + would need to do a full typecheck of the bodies of `let`s, but the + `letToHave` procedure can skip checking some subexpressions, and it + modifies the `let`s in an entire expression at once rather than one at a + time. + - A `+zetaHave` configuration option, to turn off zeta reduction of + `have`s specifically. The motivation is that dependent `let`s can only + be dsimped by let, so zeta reducing just the dependent lets is a + reasonable way to make progress. The `+zetaHave` option is also added to + the meta configuration. + - When `simp` is zeta reducing, it now uses an algorithm that avoids + complexity quadratic in the depth of the let telescope. + - Additionally, the zeta reduction routines in `simp`, `whnf`, and + `isDefEq` now all are consistent with how they apply the `zeta`, + `zetaHave`, and `zetaUnused` configurations. + +* [#8971](https://github.com/leanprover/lean4/pull/8971) fixes `linter.simpUnusedSimpArgs` to check the syntax kind, to + not fire on `simp` calls behind macros. Fixes #8969 + +* [#8973](https://github.com/leanprover/lean4/pull/8973) refactors the juggling of universes in the linear + `noConfusionType` construction: Instead of using `PUnit.{…} → ` in the + to get the branches of `withCtorType` to the same universe level, we use + `PULift`. + +* [#8978](https://github.com/leanprover/lean4/pull/8978) updates the `solveMonoStep` function used in the `monotonicity` + tactic to check for definitional equality between the current goal and + the monotonicity proof obtained from a recursive call. This ensures + soundness by preventing incorrect applications when + `Lean.Order.PartialOrder` instances differ—an issue that can arise with + `mutual` blocks defined using the `partial_fixpoint` keyword, where + different `Lean.Order.CCPO` structures may be involved. + +* [#8980](https://github.com/leanprover/lean4/pull/8980) improves the consistency of error message formatting by + rendering addenda of several existing error messages as labeled notes + and hints. + +* [#8983](https://github.com/leanprover/lean4/pull/8983) fixes a bug in congruence proof generation in `grind` for + over-applied functions. + +* [#8986](https://github.com/leanprover/lean4/pull/8986) improves the error messages produced by invalid projections and + field notation. It also adds a hint to the "function expected" error + message noting the argument to which the term is being applied, which + can be helpful for debugging spurious "function expected" messages + actually caused by syntax errors. + +* [#8991](https://github.com/leanprover/lean4/pull/8991) adds some missing `ToInt.X` typeclass instances for `grind`. + +* [#8995](https://github.com/leanprover/lean4/pull/8995) introduces a Hoare logic for monadic programs in + `Std.Do.Triple`, and assorted tactics: + + * `mspec` for applying Hoare triple specifications + * `mvcgen` to turn a Hoare triple proof obligation `⦃P⦄ prog ⦃Q⦄` into + pure verification conditoins (i.e., without any traces of Hoare triples + or weakest preconditions reminiscent of `prog`). The resulting + verification conditions in the stateful logic of `Std.Do.SPred` can be + discharged manually with the tactics coming with its custom proof mode + or with automation such as `simp` and `grind`. + +* [#8996](https://github.com/leanprover/lean4/pull/8996) provides the remaining instances for the `Lean.Grind.ToInt` + typeclasses. + +* [#9004](https://github.com/leanprover/lean4/pull/9004) ensures that type-class synthesis failure errors in interpolated + strings are displayed at the interpolant at which they occurred. + +* [#9005](https://github.com/leanprover/lean4/pull/9005) changes the definition of `Lean.Grind.ToInt.OfNat`, introducing + a `wrap` on the right-hand-side. + +* [#9008](https://github.com/leanprover/lean4/pull/9008) implements the basic infrastructure for the generic `ToInt` + support in `cutsat`. + +* [#9022](https://github.com/leanprover/lean4/pull/9022) completes the generic `toInt` infrastructure for embedding terms + implementing the `ToInt` type classes into `Int`. + +* [#9026](https://github.com/leanprover/lean4/pull/9026) implements support for (non strict) `ToInt` inequalities in + `grind cutsat`. `grind cutsat` can solve simple problems such as: + ```lean + example (a b c : Fin 11) : a ≤ b → b ≤ c → a ≤ c := by + grind + +* [#9030](https://github.com/leanprover/lean4/pull/9030) fixes a couple of bootstrapping-related hiccups in the newly + added `Std.Do` module. More precisely, + +* [#9035](https://github.com/leanprover/lean4/pull/9035) extends the list of acceptable characters to all the french ones + as well as some others, + by adding characters from the Latin-1-Supplement add Latin-Extended-A + unicode block. + +* [#9038](https://github.com/leanprover/lean4/pull/9038) adds test cases for the VC generator and implements a few small + and tedious fixes to ensure they pass. + +* [#9041](https://github.com/leanprover/lean4/pull/9041) makes `mspec` detect more viable assignments by `rfl` instead of + generating a VC. + +* [#9044](https://github.com/leanprover/lean4/pull/9044) adjusts the experimental module system to make `private` the + default visibility modifier in `module`s, introducing `public` as a new + modifier instead. `public section` can be used to revert the default for + an entire section, though this is more intended to ease gradual adoption + of the new semantics such as in `Init` (and soon `Std`) where they + should be replaced by a future decl-by-decl re-review of visibilities. + +* [#9045](https://github.com/leanprover/lean4/pull/9045) fixes a type error in `mvcgen` and makes it turn fewer natural + goals into synthetic opaque ones, so that tactics such as `trivial` may + instantiate them more easily. + +* [#9048](https://github.com/leanprover/lean4/pull/9048) implements support for strict inequalities in the `ToInt` + adapter used in `grind cutsat`. Example: + ```lean + example (a b c : Fin 11) : c ≤ 9 → a ≤ b → b < c → a < c + 1 := by + grind + ``` + +* [#9050](https://github.com/leanprover/lean4/pull/9050) ensures the `ToInt` bounds are asserted for every `toInt a` + application internalized in `grind cutsat`. + +* [#9051](https://github.com/leanprover/lean4/pull/9051) implements support for equalities and disequalities in `grind + cutsat`. We still have to improve the encoding. Examples: + ```lean + example (a b c : Fin 11) : a ≤ 2 → b ≤ 3 → c = a + b → c ≤ 5 := by + grind + +* [#9057](https://github.com/leanprover/lean4/pull/9057) introduces a simple variable-reordering heuristic for `cutsat`. + It is needed by the `ToInt` adapter to support finite types such as + `UInt64`. The current encoding into `Int` produces large coefficients, + which can enlarge the search space when an unfavorable variable order is + used. Example: + ```lean + example (a b c : UInt64) : a ≤ 2 → b ≤ 3 → c - a - b = 0 → c ≤ 5 := by + grind + ``` + +* [#9059](https://github.com/leanprover/lean4/pull/9059) adds helper theorems for normalizing coefficients in rings of + unknown characteristic. + +* [#9062](https://github.com/leanprover/lean4/pull/9062) implements support for equations ` = 0` in rings and fields + of unknown characteristic. Examples: + ```lean + example [Field α] (a : α) : (2 * a)⁻¹ = a⁻¹ / 2 := by grind + +* [#9065](https://github.com/leanprover/lean4/pull/9065) improves the counterexamples produced by the `cutsat` procedure + in `grind` when using the `ToInt` gadget. + +* [#9067](https://github.com/leanprover/lean4/pull/9067) adds a docstring for the `grind` tactic. + +* [#9069](https://github.com/leanprover/lean4/pull/9069) implements support for the type class `LawfulEqCmp`. Examples: + ```lean + example (a b c : Vector (List Nat) n) + : b = c → a.compareLex (List.compareLex compare) b = o → o = .eq → a = c := by + grind + +* [#9073](https://github.com/leanprover/lean4/pull/9073) copies #9069 to handle `ReflCmp` the same way; we need to call + this in propagateUp rather than propagateDown. + +* [#9074](https://github.com/leanprover/lean4/pull/9074) uses the commutative ring module to normalize nonlinear + polynomials in `grind cutsat`. Examples: + ```lean + example (a b : Nat) (h₁ : a + 1 ≠ a * b * a) (h₂ : a * a * b ≤ a + 1) : b * a^2 < a + 1 := by + grind + +* [#9076](https://github.com/leanprover/lean4/pull/9076) adds an unexpander for `OfSemiring.toQ`. This an auxiliary + function used by the `ring` module in `grind`, but we want to reduce the + clutter in the diagnostic information produced by `grind`. Example: + ``` + example [CommSemiring α] [AddRightCancel α] [IsCharP α 0] (x y : α) + : x^2*y = 1 → x*y^2 = y → x + y = 2 → False := by + grind + ``` + produces + ``` + [ring] Ring `Ring.OfSemiring.Q α` ▼ + [basis] Basis ▼ + [_] ↑x + ↑y + -2 = 0 + [_] ↑y + -1 = 0 + ``` + +* [#9086](https://github.com/leanprover/lean4/pull/9086) deprecates `let_fun` syntax in favor of `have` and removes + `letFun` support from WHNF and `simp`. + +* [#9087](https://github.com/leanprover/lean4/pull/9087) removes the `irreducible` attribute from `letFun`, which is one + step toward removing special `letFun` support; part of #9086. + +```` +````markdown + +## Library + +* [#8003](https://github.com/leanprover/lean4/pull/8003) adds a new monadic interface for `Async` operations. + +* [#8072](https://github.com/leanprover/lean4/pull/8072) adds DNS functions to the standard library + +* [#8109](https://github.com/leanprover/lean4/pull/8109) adds system information functions to the standard library + +* [#8178](https://github.com/leanprover/lean4/pull/8178) provides a compact formula for the MSB of the sdiv. Most of the + work in the PR involves handling the corner cases of division + overflowing (e.g. `intMin / -1 = intMin`) + +* [#8203](https://github.com/leanprover/lean4/pull/8203) adds trichotomy lemmas for unsigned and signed comparisons, + stating that only one of three cases may happen: either `x < y`, `x = + y`, or `x > y` (for both signed and unsigned comparsions). We use + explicit arguments so that users can write `rcases slt_trichotomy x y + with hlt | heq | hgt`. + +* [#8205](https://github.com/leanprover/lean4/pull/8205) adds a simp lemma that simplifies T-division where the numerator + is a `Nat` into an E-division: + + + ```lean + @[simp] theorem ofNat_tdiv_eq_ediv {a : Nat} {b : Int} : (a : Int).tdiv b = a / b := + tdiv_eq_ediv_of_nonneg (by simp) + ``` + +* [#8210](https://github.com/leanprover/lean4/pull/8210) adds an equivalence relation to tree maps akin to the existing + one for hash maps. In order to get many congruence lemmas to eventually + use for defining functions on extensional tree maps, almost all of the + remaining tree map functions have also been given lemmas to relate them + to list functions, although these aren't currently used to prove lemmas + other than congruence lemmas. + +* [#8253](https://github.com/leanprover/lean4/pull/8253) adds `toInt_smod` and auxilliary lemmas necessary for its proof + (`msb_intMin_umod_neg_of_msb_true`, + `msb_neg_umod_neg_of_msb_true_of_msb_true`, `toInt_dvd_toInt_iff`, + `toInt_dvd_toInt_iff_of_msb_true_msb_false`, + `toInt_dvd_toInt_iff_of_msb_false_msb_true`, + `neg_toInt_neg_umod_eq_of_msb_true_msb_true`, `toNat_pos_of_ne_zero`, + `toInt_umod_neg_add`, `toInt_sub_neg_umod` and + `BitVec.[lt_of_msb_false_of_msb_true, msb_umod_of_msb_false_of_ne_zero`, + `neg_toInt_neg]`) + +* [#8420](https://github.com/leanprover/lean4/pull/8420) provides the iterator combinator `drop` that transforms any + iterator into one that drops the first `n` elements. + +* [#8534](https://github.com/leanprover/lean4/pull/8534) fixes `IO.FS.realPath` on windows to take symbolic links into + account. + +* [#8545](https://github.com/leanprover/lean4/pull/8545) provides the means to reason about "equivalent" iterators. + Simply speaking, two iterators are equivalent if they behave the same as + long as consumers do not introspect their states. + +* [#8546](https://github.com/leanprover/lean4/pull/8546) adds a new `BitVec.clz` operation and a corresponding `clz` + circuit to `bv_decide`, allowing to bitblast the count leading zeroes + operation. The AIG circuit is linear in the number of bits of the + original expression, making the bitblasting convenient wrt. rewriting. + `clz` is common in numerous compiler intrinsics (see + [here](https://clang.llvm.org/docs/LanguageExtensions.html#intrinsics-support-within-constant-expressions)) + and architectures (see + [here](https://en.wikipedia.org/wiki/Find_first_set)). + +* [#8573](https://github.com/leanprover/lean4/pull/8573) avoids the likely unexpected behavior of `removeDirAll` to + delete through symlinks and adds the new function + `IO.FS.symlinkMetadata`. + +* [#8585](https://github.com/leanprover/lean4/pull/8585) makes the lemma `BitVec.extractLsb'_append_eq_ite` more usable + by using the "simple case" more often, and uses this simplification to + make `BitVec.extractLsb'_append_eq_of_add_lt` stronger, renaming it to + `BitVec.extractLsb'_append_eq_of_add_le`. + +* [#8587](https://github.com/leanprover/lean4/pull/8587) adjusts the grind annotation on + `Std.HashMap.map_fst_toList_eq_keys` and variants, so `grind` can reason + bidirectionally between `m.keys` and `m.toList`. + +* [#8590](https://github.com/leanprover/lean4/pull/8590) adds `@[grind]` to `getElem?_pos` and variants. + +* [#8615](https://github.com/leanprover/lean4/pull/8615) provides a special empty iterator type. Although its behavior + can be emulated with a list iterator (for example), having a special + type has the advantage of being easier to optimize for the compiler. + +* [#8620](https://github.com/leanprover/lean4/pull/8620) removes the `NatCast (Fin n)` global instance (both the direct + instance, and the indirect one via `Lean.Grind.Semiring`), as that + instance causes causes `x < n` (for `x : Fin k`, `n : Nat`) to be + elaborated as `x < ↑n` rather than `↑x < n`, which is undesirable. Note + however that in Mathlib this happens anyway! + +* [#8629](https://github.com/leanprover/lean4/pull/8629) replaces special, more optimized `IteratorLoop` instances, for + which no lawfulness proof has been made, with the verified default + implementation. The specialization of the loop/collect implementations + is low priority, but having lawfulness instances for all iterators is + important for verification. + +* [#8631](https://github.com/leanprover/lean4/pull/8631) generalizes `Std.Sat.AIG. relabel(Nat)_unsat_iff` to allow the + AIG type to be empty. We generalize the proof, by showing that in the + case when `α` is empty, the environment doesn't matter, since all + environments `α → Bool` are isomorphic. + +* [#8640](https://github.com/leanprover/lean4/pull/8640) adds `BitVec.setWidth'_eq` to `bv_normalize` such that + `bv_decide` can reduce it and solve lemmas involving `setWidth'_eq` + +* [#8669](https://github.com/leanprover/lean4/pull/8669) makes `unsafeBaseIO` `noinline`. The new compiler is better at + optimizing `Result`-like types, which can cause the final operation in + an `unsafeBaseIO` block to be dropped, since `unsafeBaseIO` is + discarding the state. + +* [#8678](https://github.com/leanprover/lean4/pull/8678) makes the LHS of `isSome_finIdxOf?` and `isNone_finIdxOf?` more + general. + +* [#8703](https://github.com/leanprover/lean4/pull/8703) corrects the `IteratorLoop` instance in `DropWhile`, which + previously triggered for arbitrary iterator types. + +* [#8719](https://github.com/leanprover/lean4/pull/8719) adds grind annotations for + List/Array/Vector.eraseP/erase/eraseIdx. It also adds some missing + lemmas. + +* [#8721](https://github.com/leanprover/lean4/pull/8721) adds the types `Std.ExtDTreeMap`, `Std.ExtTreeMap` and + `Std.ExtTreeSet` of extensional tree maps and sets. These are very + similar in construction to the existing extensional hash maps with one + exception: extensional tree maps and sets provide all functions from + regular tree maps and sets. This is possible because in contrast to hash + maps, tree maps are always ordered. + +* [#8734](https://github.com/leanprover/lean4/pull/8734) adds the missing instance + ``` + instance decidableExistsFin (P : Fin n → Prop) [DecidablePred P] : Decidable (∃ i, P i) + ``` + +* [#8740](https://github.com/leanprover/lean4/pull/8740) introduces associativity rules and preservation of `(umul, smul, + uadd, sadd)Overflow`flags. + +* [#8741](https://github.com/leanprover/lean4/pull/8741) adds annotations for + `List/Array/Vector.find?/findSome?/idxOf?/findIdx?`. + +* [#8742](https://github.com/leanprover/lean4/pull/8742) fixes a bug where the single-quote character `Char.ofNat 39` + would delaborate as `'''`, which causes a parse error if pasted back in + to the source code. + +* [#8745](https://github.com/leanprover/lean4/pull/8745) adds a logic of stateful predicates `SPred` to `Std.Do` in order + to support reasoning about monadic programs. It comes with a dedicated + proof mode the tactics of which are accessible by importing + `Std.Tactic.Do`. + +* [#8747](https://github.com/leanprover/lean4/pull/8747) adds grind annotations for \`List/Array/Vector.finRange\` + theorems. + +* [#8748](https://github.com/leanprover/lean4/pull/8748) adds grind annotations for `Array/Vector.mapIdx` and `mapFinIdx` + theorems. + +* [#8749](https://github.com/leanprover/lean4/pull/8749) adds grind annotations for `List/Array/Vector.ofFn` theorems and + additional `List.Impl` find operations. + +* [#8750](https://github.com/leanprover/lean4/pull/8750) adds grind annotations for the + `List/Array/Vector.zipWith/zipWithAll/unzip` functions. + +* [#8765](https://github.com/leanprover/lean4/pull/8765) adds grind annotations for `List.Perm`; involves a revision of + grind annotations for `List.countP/count` as well. + +* [#8768](https://github.com/leanprover/lean4/pull/8768) introduces a `ForIn'` instance and a `size` function for + iterators in a minimal fashion. The `ForIn'` instance is not marked as + an instance because it is unclear which `Membership` relation is + sufficiently useful. The `ForIn'` instance existing as a `def` and + inducing the `ForIn` instance, it becomes possible to provide more + specialized `ForIn'` instances, with nice `Membership` relations, for + various types of iterators. The `size` function has no lemmas yet. + +* [#8784](https://github.com/leanprover/lean4/pull/8784) introduces ranges that are polymorphic, in contrast to the + existing `Std.Range` which only supports natural numbers. + +* [#8805](https://github.com/leanprover/lean4/pull/8805) continues adding `grind` annotations for `List/Array/Vector` + lemmas. + +* [#8808](https://github.com/leanprover/lean4/pull/8808) adds the missing `le_of_add_left_le {n m k : Nat} (h : k + n ≤ + m) : n ≤ m` and `le_add_left_of_le {n m k : Nat} (h : n ≤ m) : n ≤ k + + m`. + +* [#8811](https://github.com/leanprover/lean4/pull/8811) adds theorems `BitVec.(toNat, toInt, + toFin)_shiftLeftZeroExtend`, completing the API for + `BitVec.shiftLeftZeroExtend`. + +* [#8826](https://github.com/leanprover/lean4/pull/8826) corrects the definition of `Lean.Grind.NatModule`, which wasn't + previously useful. + +* [#8827](https://github.com/leanprover/lean4/pull/8827) renames `BitVec.getLsb'` to `BitVec.getLsb`, now that older + deprecated definition occupying that name has been removed. (Similarly + for `BitVec.getMsb'`.) + +* [#8829](https://github.com/leanprover/lean4/pull/8829) avoids importing all of `BitVec.Lemmas` and `BitVec.BitBlast` + into `UInt.Lemmas`. (They are still imported into `SInt.Lemmas`; this + seems much harder to avoid.) + +* [#8830](https://github.com/leanprover/lean4/pull/8830) rearranges files under `Init.Grind`, moving out instances for + concrete algebraic types in `Init.GrindInstances`. + +* [#8849](https://github.com/leanprover/lean4/pull/8849) adds `grind` annotations for `Sum`. + +* [#8850](https://github.com/leanprover/lean4/pull/8850) adds `grind` annotations for `Prod`. + +* [#8851](https://github.com/leanprover/lean4/pull/8851) adds grind annotations for `Function.curry`/`uncurry`. + +* [#8852](https://github.com/leanprover/lean4/pull/8852) adds grind annotations for `Nat.testBit` and bitwise operations + on `Nat`. + +* [#8853](https://github.com/leanprover/lean4/pull/8853) adds `grind` annotations relating `Nat.fold/foldRev/any/all` and + `Fin.foldl/foldr/foldlM/foldrM` to the corresponding operations on + `List.finRange`. + +* [#8877](https://github.com/leanprover/lean4/pull/8877) adds grind annotations for + `List/Array/Vector.attach/attachWith/pmap`. + +* [#8878](https://github.com/leanprover/lean4/pull/8878) adds grind annotations for List/Array/Vector monadic functions. + +* [#8886](https://github.com/leanprover/lean4/pull/8886) adds `IO.FS.Stream.readToEnd` which parallels + `IO.FS.Handle.readToEnd` along with its upstream definitions (i.e., + `readBinToEndInto` and `readBinToEnd`). It also removes an unnecessary + `partial` from `IO.FS.Handle.readBinToEnd`. + +* [#8887](https://github.com/leanprover/lean4/pull/8887) generalizes `IO.FS.lines` with `IO.FS.Handle.lines` and adds the + parallel `IO.FS.Stream.lines` for streams. + +* [#8897](https://github.com/leanprover/lean4/pull/8897) simplifies some `simp` calls. + +* [#8905](https://github.com/leanprover/lean4/pull/8905) uses the linter from + https://github.com/leanprover/lean4/pull/8901 to clean up simp + arguments. + +* [#8920](https://github.com/leanprover/lean4/pull/8920) uses the linter from #8901 to clean up more simp arguments, + completing #8905. + +* [#8928](https://github.com/leanprover/lean4/pull/8928) adds a logic of stateful predicates SPred to Std.Do in order to + support reasoning about monadic programs. It comes with a dedicated + proof mode the tactics of which are accessible by importing + Std.Tactic.Do. + +* [#8941](https://github.com/leanprover/lean4/pull/8941) adds `BitVec.(getElem, getLsbD, getMsbD)_(smod, sdiv, srem)` + theorems to complete the API for `sdiv`, `srem`, `smod`. Even though the + rhs is not particularly succint (it's hard to find a meaning for what it + means to have "the n-th bit of the result of a signed division/modulo + operation"), these lemmas prevent the need to `unfold` of operations. + +* [#8947](https://github.com/leanprover/lean4/pull/8947) introduces polymorphic slices in their most basic form. They + come with a notation similar to the new range notation. `Subarray` is + now also a slice and can produce an iterator now. It is intended to + migrate more operations of `Subarray` to the `Slice` wrapper type to + make them available for slices of other types, too. + +* [#8950](https://github.com/leanprover/lean4/pull/8950) adds `BitVec.toFin_(sdiv, smod, srem)` and `BitVec.toNat_srem`. + The strategy for the `rhs` of the `toFin_*` lemmas is to consider what + the corresponding `toNat_*` theorems do and push the `toFin` closerto + the operands. For the `rhs` of `BitVec.toNat_srem` I used the same + strategy as `BitVec.toNat_smod`. + +* [#8967](https://github.com/leanprover/lean4/pull/8967) both adds initial `@[grind]` annotations for `BitVec`, and uses + `grind` to remove many proofs from `BitVec/Lemmas`. + +* [#8974](https://github.com/leanprover/lean4/pull/8974) adds `BitVec.msb_(smod, srem)`. + +* [#8977](https://github.com/leanprover/lean4/pull/8977) adds a generic `MonadLiftT Id m` instance. We do not implement a + `MonadLift Id m` instance because it would slow down instance resolution + and because it would create more non-canonical instances. This change + makes it possible to iterate over a pure iterator, such as `[1, 2, + 3].iter`, in arbitrary monads. + +* [#8992](https://github.com/leanprover/lean4/pull/8992) adds `PULift`, a more general form of `ULift` and `PLift` that + subsumes both. + +* [#8995](https://github.com/leanprover/lean4/pull/8995) introduces a Hoare logic for monadic programs in + `Std.Do.Triple`, and assorted tactics: + + * `mspec` for applying Hoare triple specifications + * `mvcgen` to turn a Hoare triple proof obligation `⦃P⦄ prog ⦃Q⦄` into + pure verification conditoins (i.e., without any traces of Hoare triples + or weakest preconditions reminiscent of `prog`). The resulting + verification conditions in the stateful logic of `Std.Do.SPred` can be + discharged manually with the tactics coming with its custom proof mode + or with automation such as `simp` and `grind`. + +* [#9027](https://github.com/leanprover/lean4/pull/9027) provides an iterator combinator that lifts the emitted values + into a higher universe level via `ULift`. This combinator is then used + to make the subarray iterators universe-polymorphic. Previously, they + were only available for `Subarray α` if `α : Type`. + +* [#9030](https://github.com/leanprover/lean4/pull/9030) fixes a couple of bootstrapping-related hiccups in the newly + added `Std.Do` module. More precisely, + +* [#9038](https://github.com/leanprover/lean4/pull/9038) adds test cases for the VC generator and implements a few small + and tedious fixes to ensure they pass. + +* [#9049](https://github.com/leanprover/lean4/pull/9049) proves that the default `toList`, `toListRev` and `toArray` + functions on slices can be described in terms of the slice iterator. + Relying on new lemmas for the `uLift` and `attachWith` iterator + combinators, a more concrete description of said functions is given for + `Subarray`. + +* [#9054](https://github.com/leanprover/lean4/pull/9054) corrects some inconsistencies in `TreeMap`/`HashMap` grind + annotations, for `isSome_get?_eq_contains` and `empty_eq_emptyc`. + +* [#9055](https://github.com/leanprover/lean4/pull/9055) renames `Array/Vector.extract_push` to `extract_push_of_le`, and + replaces the lemma with one without a side condition. + +* [#9058](https://github.com/leanprover/lean4/pull/9058) provides a `ToStream` instance for slices so that they can be + used in `for i in xs, j in ys do` notation. + +* [#9075](https://github.com/leanprover/lean4/pull/9075) adds `BEq` instances for `ByteArray` and `FloatArray` (also a + `DecidableEq` instance for `ByteArray`). + +## Compiler + +* [#8594](https://github.com/leanprover/lean4/pull/8594) removes incorrect optimizations for strictOr/strictAnd from the + old compiler, along with deleting an incorrect test. In order to do + these optimizations correctly, nontermination analysis is required. + Arguably, the correct way to express these optimizations is by exposing + the implementation of strictOr/strictAnd to a nontermination-aware phase + of the compiler, and then having them follow from more general + transformations. + +* [#8595](https://github.com/leanprover/lean4/pull/8595) wraps the invocation of the new compiler in `withoutExporting`. + This is not necessary for the old compiler because it uses more direct + access to the kernel environment. + +* [#8602](https://github.com/leanprover/lean4/pull/8602) adds support to the new compiler for `Eq.recOn` (which is + supported by the old compiler but missing a test). + +* [#8604](https://github.com/leanprover/lean4/pull/8604) adds support for the `compiler.extract_closed` option to the new + compiler, since this is used by the definition of `unsafeBaseIO`. We'll + revisit this once we switch to the new compiler and rethink its + relationship with IO. + +* [#8614](https://github.com/leanprover/lean4/pull/8614) implements constant folding for `toNat` in the new compiler, + which improves parity with the old compiler. + +* [#8616](https://github.com/leanprover/lean4/pull/8616) adds constant folding for `Nat.pow` to the new compiler, + following the same limits as the old compiler. + +* [#8618](https://github.com/leanprover/lean4/pull/8618) implements LCNF constant folding for `Nat.nextPowerOfTwo`. + +* [#8634](https://github.com/leanprover/lean4/pull/8634) makes `hasTrivialStructure?` return false for types whose + constructors have types that are erased, e.g. if they construct a + `Prop`. + +* [#8636](https://github.com/leanprover/lean4/pull/8636) adds a function called `lean_setup_libuv` that initializes + required LIBUV components. It needs to be outside of + `lean_initialize_runtime_module` because it requires `argv` and `argc` + to work correctly. + +* [#8647](https://github.com/leanprover/lean4/pull/8647) improves the precision of the new compiler's `noncomputable` + check for projections. There is no test included because while this was + reduced from Mathlib, the old compiler does not correctly handle the + reduced test case. It's not entirely clear to me if the check is passing + with the old compiler for correct reasons. A test will be added to the + new compiler's branch. + +* [#8675](https://github.com/leanprover/lean4/pull/8675) increases the precision of the new compiler's non computable + check, particularly around irrelevant uses of `noncomputable` defs in + applications. + +* [#8681](https://github.com/leanprover/lean4/pull/8681) adds an optimization to the LCNF simp pass where the + discriminant of a `cases` construct will only be mark used if it has a + non-default alternative. + +* [#8683](https://github.com/leanprover/lean4/pull/8683) adds an optimization to the LCNF simp pass where the + discriminant of a single-alt cases is only marked as used if any param + is used. + +* [#8709](https://github.com/leanprover/lean4/pull/8709) handles constants with erased types in `toMonoType`. It is much + harder to write a test case for this than you would think, because most + references to such types get replaced with `lcErased` earlier. + +* [#8712](https://github.com/leanprover/lean4/pull/8712) optimizes let decls of an erased type to an erased value. + Specialization can create local functions that produce a Prop, and + there's no point in keeping them around. + +* [#8716](https://github.com/leanprover/lean4/pull/8716) makes any type application of an erased term to be erased. This + comes up a bit more than one would expect in the implementation of Lean + itself. + +* [#8717](https://github.com/leanprover/lean4/pull/8717) uses the fvar substitution mechanism to replace erased code. + This isn't entirely satisfactory, since LCNF's `.return` doesn't support + a general `Arg` (which has a `.erased` constructor), it only supports an + `FVarId`. This is in contrast to the IR `.ret`, which does support a + general `Arg`. + +* [#8729](https://github.com/leanprover/lean4/pull/8729) changes LCNF's `FVarSubst` to use `Arg` rather than `Expr`. This + enforces the requirements on substitutions, which match the requirements + on `Arg`. + +* [#8752](https://github.com/leanprover/lean4/pull/8752) fixes an issue where the `extendJoinPointContext` pass can lift + join points containing projections to the top level, as siblings of + `cases` constructs matching on other projections of the same base value. + This prevents the `structProjCases` pass from projecting both at once, + extending the lifetime of the parent value and breaking linearity at + runtime. + +* [#8754](https://github.com/leanprover/lean4/pull/8754) changes the implementation of computed fields in the new + compiler, which should enable more optimizations (and remove a + questionable hack in `toLCNF` that was only suitable for bringup). We + convert `casesOn` to `cases` like we do for other inductive types, all + constructors get replaced by their real implementations late in the base + phase, and then the `cases` expression is rewritten to use the real + constructors in `toMono`. + +* [#8758](https://github.com/leanprover/lean4/pull/8758) adds caching for the `hasTrivialStructure?` function for LCNF + types. This is one of the hottest small functions in the new compiler, + so adding a cache makes a lot of sense. + +* [#8764](https://github.com/leanprover/lean4/pull/8764) changes the LCNF pass pipeline so checks are no longer run by + default after every pass, only after `init`, `saveBase`, `toMono` and + `saveMono`. This is a compile time improvement, and the utility of these + checks is decreased a bit after the decision to no longer attempt to + preserve types throughout compilation. They have not been a significant + way to discover issues during development of the new compiler. + +* [#8802](https://github.com/leanprover/lean4/pull/8802) fixes a bug in `floatLetIn` where if one decl (e.g. a join + point) is floated into a case arm and it uses another decl (e.g. another + join point) that does not have any other existing uses in that arm, then + the second decl does not get floated in despite this being perfectly + legal. This was causing artificial array linearity issues in + `Lean.Elab.Tactic.BVDecide.LRAT.trim.useAnalysis`. + +* [#8816](https://github.com/leanprover/lean4/pull/8816) adds constant folding for Char.ofNat in LCNF simp. This + implicitly relies on the representation of `Char` as `UInt32` rather + than making a separate `.char` literal type, which seems reasonable as + `Char` is erased by the trivial structure optimization in `toMono`. + +* [#8822](https://github.com/leanprover/lean4/pull/8822) adds a cache for constructor info in toIR. This is called for + all constructors, projections, and cases alternatives, so it makes sense + to cache. + +* [#8825](https://github.com/leanprover/lean4/pull/8825) improves IR generation for constructors of inductive types that + are represented by scalars. Surprisingly, this isn't required for + correctness, because the boxing pass will fix it up. The extra `unbox` + operation it inserts shouldn't matter when compiling to native code, + because it's trivial for a C compiler to optimize, but it does matter + for the interpreter. + +* [#8831](https://github.com/leanprover/lean4/pull/8831) caches the result of `lowerEnumToScalarType`, which is used + heavily in LCNF to IR conversion. + +* [#8885](https://github.com/leanprover/lean4/pull/8885) removes an old workaround around non-implemented C++11 features + in the thread finalization. + +* [#8923](https://github.com/leanprover/lean4/pull/8923) implements `casesOn` for `Thunk` and `Task`. Since these are + builtin types, this needs to be special-cased in `toMono`. + +* [#8952](https://github.com/leanprover/lean4/pull/8952) fixes the handling of the `never_extract` attribute in the + compiler's CSE pass. There is an interesting debate to be had about + exactly how hard the compiler should try to avoid duplicating anything + that transitively uses `never_extract`, but this is the simplest form + and roughly matches the check in the old compiler (although due to + different handling of local function decls in the two compilers, the + consequences might be slightly different). + +* [#8956](https://github.com/leanprover/lean4/pull/8956) changes `toLCNF` to stop caching translations of expressions + upon seeing an expression marked `never_extract`. This is more + coarse-grained than it needs to be, but it is difficult to do any + better, as the new compiler's `Expr` cache is based on structural + identity (rather than the pointer identity of the old compiler). + +* [#9003](https://github.com/leanprover/lean4/pull/9003) implements the validity check for the type of `main` in the new + compiler. There were no tests for this, so it slipped under the radar. + +## Pretty Printing + +* [#7954](https://github.com/leanprover/lean4/pull/7954) improves `pp.oneline`, where it now preserves tags when + truncating formatted syntax to a single line. Note that the `[...]` + continuation does not yet have any functionality to enable seeing the + untruncated syntax. Closes #3681. + +* [#8617](https://github.com/leanprover/lean4/pull/8617) fixes (1) an issue where private names are not unresolved when + they are pretty printed, (2) an issue where in `pp.universes` mode names + were allowed to shadow local names, (3) an issue where in `match` + patterns constants shadowing locals wouldn't use `_root_`, and (4) an + issue where tactics might have an incorrect "try this" when + `pp.fullNames` is set. Adds more delaboration tests for name + unresolution. + +* [#8626](https://github.com/leanprover/lean4/pull/8626) closes #3791, making sure that the Syntax formatter inserts + whitespace before and after comments in the leading and trailing text of + Syntax to avoid having comments comment out any following syntax, and to + avoid comments' lexical syntax from being interpreted as being part of + another syntax. If the text contains newlines before or after any + comments, they are formatted as hard newlines rather than soft newlines. + For example, `--` comments will have a hard newline after them. Note: + metaprograms generating Syntax with comments should be sure to include + newlines at the ends of `--` comments. + +## Documentation + +* [#8934](https://github.com/leanprover/lean4/pull/8934) adds explanations for a few errors concerning noncomputability, + redundant match alternatives, and invalid inductive declarations. + +* [#8990](https://github.com/leanprover/lean4/pull/8990) adds missing doc-strings for grind's internal algebra + typeclasses, for inclusion in the reference manual. + +* [#8998](https://github.com/leanprover/lean4/pull/8998) makes the docstrings related to `Format` and `Repr` have + consistent formatting and style, and adds missing docstrings. + +## Server + +* [#8105](https://github.com/leanprover/lean4/pull/8105) adds support for server-sided `RpcRef` reuse and fixes a bug + where trace nodes in the InfoView would close while the file was still + being processed. + +* [#8511](https://github.com/leanprover/lean4/pull/8511) implements signature help support. When typing a function + application, editors with support for signature help will now display a + popup that designates the current (remaining) function type. This + removes the need to remember the function signature while typing the + function application, or having to constantly cycle between hovering + over the function identifier and typing the application. In VS Code, the + signature help can be triggered manually using `Ctrl+Shift+Space`. + +* [#8654](https://github.com/leanprover/lean4/pull/8654) adds server-side support for a new module hierarchy component in + VS Code that can be used to navigate both the import tree of a module + and the imported-by tree of a module. Specifically, it implements new + requests `$/lean/prepareModuleHierarchy`, + `$/lean/moduleHierarchy/imports` and + `$/lean/moduleHierarchy/importedBy`. These requests are not supported by + standard LSP. Companion PR at + [leanprover/vscode-lean4#620](https://github.com/leanprover/vscode-lean4/pull/620). + +* [#8699](https://github.com/leanprover/lean4/pull/8699) adds support to the server for the new module setup process by + changing how `lake setup-file` is used. + +* [#8868](https://github.com/leanprover/lean4/pull/8868) ensures that code actions do not have to wait for the full file + to elaborate. This regression was accidentally introduced in #7665. + +* [#9019](https://github.com/leanprover/lean4/pull/9019) fixes a bug where semantic highlighting would only highlight + keywords that started with an alphanumeric character. Now, it uses + `Lean.isIdFirst`. + +## Lake + +* [#7738](https://github.com/leanprover/lean4/pull/7738) makes memoization of built-in facets toggleable through a + `memoize` option on the facet configuration. Built-in facets which are + essentially aliases (e.g., `default`, `o`) have had memoization + disabled. + +* [#8447](https://github.com/leanprover/lean4/pull/8447) makes use of `lean --setup` in Lake builds of Lean modules and + adds Lake support for the new `.olean` artifacts produced by the module + system. + +* [#8613](https://github.com/leanprover/lean4/pull/8613) changes the Lake version syntax (to `5.0.0-src+`) to + ensure it is a well-formed SemVer, + +* [#8656](https://github.com/leanprover/lean4/pull/8656) enables auto-implicits in the Lake math template. This resolves + an issue where new users sometimes set up a new project for math + formalization and then quickly realize that none of the code samples in + our official books and docs that use auto-implicits work in their + projects. With the introduction of [inlay hints for + auto-implicits](https://github.com/leanprover/lean4/pull/6768), we + consider the auto-implicit UX to be sufficiently usable that they can be + enabled by default in the math template. + Notably, this change does not affect Mathlib itself, which will proceed + to disable auto-implicits. + +* [#8701](https://github.com/leanprover/lean4/pull/8701) exports `LeanOption` in the `Lean` namespace from the `Lake` + namespace. `LeanOption` was moved from `Lean` to `Lake` in #8447, which + can cause unnecessary breakage without this. + +* [#8736](https://github.com/leanprover/lean4/pull/8736) partially reverts #8024 which introduced a significant Lake + performance regression during builds. Once the cause is discovered and + fixed, a similar PR will be made to revert this. + +* [#8846](https://github.com/leanprover/lean4/pull/8846) reintroduces the basics of `lean --setup` integration into Lake + without the module computation which is still undergoing performance + debugging in #8787. + +* [#8866](https://github.com/leanprover/lean4/pull/8866) upgrades the `math` template for `lake init` and `lake new` to + configures the new project to meet rigorous Mathlib maintenance + standards. In comparison with the previous version (now available as + `lake new ... math-lax`), this automatically provides: + + * Strict linting options matching Mathlib. + * GitHub workflow for automatic upgrades to newer Lean and Mathlib + releases. + * Automatic release tagging for toolchain upgrades. + * API documentation generated by + [doc-gen4](https://github.com/leanprover/doc-gen4) and hosted on + `github.io`. + * README with some GitHub-specific instructions. + +* [#8922](https://github.com/leanprover/lean4/pull/8922) introduces a local artifact cache for Lake. When enabled, Lake + will shared build artifacts (built files) across different instances of + the same package using an input- and content-addressed cache. + +* [#8981](https://github.com/leanprover/lean4/pull/8981) removes Lake's usage of `lean -R` and `moduleNameOfFileName` to + pass module names to Lean. For workspace names, it now relies on + directly passing the module name through `lean --setup`. For + non-workspace modules passed to `lake lean` or `lake setup-file`, it + uses a fixed module name of `_unknown`. + +* [#9068](https://github.com/leanprover/lean4/pull/9068) fixes some bugs with the local Lake artifact cache and cleans up + the surrounding API. It also adds the ability to opt-in to the cache on + packages without `enableArtifactCache` set using the + `LAKE_ARTIFACT_CACHE` environment variable. + +* [#9081](https://github.com/leanprover/lean4/pull/9081) fixes a bug with Lake where the job monitor would sit on a + top-level build (e.g., `mathlib/Mathlib:default`) instead of reporting + module build progress. + +* [#9101](https://github.com/leanprover/lean4/pull/9101) fixes a bug introduce by #9081 where the source file was dropped + from the module input trace and some entries were dropped from the + module job log. + +## Other + +* [#8702](https://github.com/leanprover/lean4/pull/8702) enhances the PR release workflow to create both short format and + SHA-suffixed release tags. Creates both pr-release-{PR_NUMBER} and + pr-release-{PR_NUMBER}-{SHORT_SHA} tags, generates separate releases for + both formats, adds separate GitHub status checks, and updates + Batteries/Mathlib testing branches to use SHA-suffixed tags for exact + commit traceability. + +* [#8710](https://github.com/leanprover/lean4/pull/8710) pins the precise hash of softprops/action-gh-release to + +* [#9033](https://github.com/leanprover/lean4/pull/9033) adds a Mathlib-like testing and feedback system for the + reference manual. Lean PRs will receive comments that reflect the status + of the language reference with respect to the PR. + +* [#9092](https://github.com/leanprover/lean4/pull/9092) further updates release automation. The per-repository update + scripts `script/release_steps.py` now actually performs the tests, + rather than outputting a script for the release manager to run line by + line. It's been tested on `v4.21.0` (i.e. the easy case of a stable + release), and we'll debug its behaviour on `v4.22.0-rc1` tonight. + + +```` diff --git a/Manual/Releases/v4.2.0.lean b/Manual/Releases/v4_2_0.lean similarity index 99% rename from Manual/Releases/v4.2.0.lean rename to Manual/Releases/v4_2_0.lean index 692ca20c..b1f58128 100644 --- a/Manual/Releases/v4.2.0.lean +++ b/Manual/Releases/v4_2_0.lean @@ -18,7 +18,7 @@ tag := "release-v4.2.0" file := "v4.2.0" %%% -`````markdown +```markdown * [isDefEq cache for terms not containing metavariables.](https://github.com/leanprover/lean4/pull/2644). * Make [`Environment.mk`](https://github.com/leanprover/lean4/pull/2604) and [`Environment.add`](https://github.com/leanprover/lean4/pull/2642) private, and add [`replay`](https://github.com/leanprover/lean4/pull/2617) as a safer alternative. * `IO.Process.output` no longer inherits the standard input of the caller. @@ -33,4 +33,4 @@ file := "v4.2.0" * This is accomplished via changes to `withCollectingNewGoalsFrom`, which also affects `elabTermWithHoles`, `refine'`, `calc` (tactic), and `specialize`. Likewise, all of these now only include newly-created metavariables in their output. * Previously, both newly-created and pre-existing metavariables occurring in `e` were returned inconsistently in different edge cases, causing duplicated goals in the infoview (issue [#2495](https://github.com/leanprover/lean4/issues/2495)), erroneously closed goals (issue [#2434](https://github.com/leanprover/lean4/issues/2434)), and unintuitive behavior due to `refine e` capturing previously-created goals appearing unexpectedly in `e` (no issue; see PR). -````` +``` diff --git a/Manual/Releases/v4.3.0.lean b/Manual/Releases/v4_3_0.lean similarity index 99% rename from Manual/Releases/v4.3.0.lean rename to Manual/Releases/v4_3_0.lean index 7bbdb0f8..3f411f99 100644 --- a/Manual/Releases/v4.3.0.lean +++ b/Manual/Releases/v4_3_0.lean @@ -18,7 +18,7 @@ tag := "release-v4.3.0" file := "v4.3.0" %%% -`````markdown +```markdown * `simp [f]` does not unfold partial applications of `f` anymore. See issue [#2042](https://github.com/leanprover/lean4/issues/2042). To fix proofs affected by this change, use `unfold f` or `simp (config := { unfoldPartialApp := true }) [f]`. * By default, `simp` will no longer try to use Decidable instances to rewrite terms. In particular, not all decidable goals will be closed by `simp`, and the `decide` tactic may be useful in such cases. The `decide` simp configuration option can be used to locally restore the old `simp` behavior, as in `simp (config := {decide := true})`; this includes using Decidable instances to verify side goals such as numeric inequalities. @@ -54,4 +54,4 @@ file := "v4.3.0" * Deprecate the `manifestFile` field of a package configuration. * There is now a more rigorous check on `lakefile.olean` compatibility (see [#2842](https://github.com/leanprover/lean4/pull/2842) for more details). -````` +``` diff --git a/Manual/Releases/v4.4.0.lean b/Manual/Releases/v4_4_0.lean similarity index 99% rename from Manual/Releases/v4.4.0.lean rename to Manual/Releases/v4_4_0.lean index 9e87c3e5..218ab4c9 100644 --- a/Manual/Releases/v4.4.0.lean +++ b/Manual/Releases/v4_4_0.lean @@ -18,7 +18,7 @@ tag := "release-v4.4.0" file := "v4.4.0" %%% -`````markdown +````markdown * Lake and the language server now support per-package server options using the `moreServerOptions` config field, as well as options that apply to both the language server and `lean` using the `leanOptions` config field. Setting either of these fields instead of `moreServerArgs` ensures that viewing files from a dependency uses the options for that dependency. Additionally, `moreServerArgs` is being deprecated in favor of the `moreGlobalServerArgs` field. See PR [#2858](https://github.com/leanprover/lean4/pull/2858). A Lakefile with the following deprecated package declaration: @@ -62,4 +62,4 @@ Bug fixes for [#2628](https://github.com/leanprover/lean4/issues/2628), [#2883]( * Later packages and libraries in the dependency tree are now preferred over earlier ones. That is, the later ones "shadow" the earlier ones. Such an ordering is more consistent with how declarations generally work in programming languages. This will break any package that relied on the previous ordering. See issue [#2548](https://github.com/leanprover/lean4/issues/2548) and PR [#2937](https://github.com/leanprover/lean4/pull/2937). * Executable roots are no longer mistakenly treated as importable. They will no longer be picked up by `findModule?`. See PR [#2937](https://github.com/leanprover/lean4/pull/2937). -````` +```` diff --git a/Manual/Releases/v4.5.0.lean b/Manual/Releases/v4_5_0.lean similarity index 99% rename from Manual/Releases/v4.5.0.lean rename to Manual/Releases/v4_5_0.lean index 24796d4c..78bde223 100644 --- a/Manual/Releases/v4.5.0.lean +++ b/Manual/Releases/v4_5_0.lean @@ -18,7 +18,7 @@ tag := "release-v4.5.0" file := "v4.5.0" %%% -`````markdown +````markdown * Modify the lexical syntax of string literals to have string gaps, which are escape sequences of the form `"\" newline whitespace*`. These have the interpretation of an empty string and allow a string to flow across multiple lines without introducing additional whitespace. The following is equivalent to `"this is a string"`. @@ -94,4 +94,4 @@ Improve [short-circuiting behavior](https://github.com/leanprover/lean4/pull/297 Several Lake bug fixes: [#3036](https://github.com/leanprover/lean4/issues/3036), [#3064](https://github.com/leanprover/lean4/issues/3064), [#3069](https://github.com/leanprover/lean4/issues/3069). -````` +```` diff --git a/Manual/Releases/v4.6.0.lean b/Manual/Releases/v4_6_0.lean similarity index 99% rename from Manual/Releases/v4.6.0.lean rename to Manual/Releases/v4_6_0.lean index 99dec639..45cca7aa 100644 --- a/Manual/Releases/v4.6.0.lean +++ b/Manual/Releases/v4_6_0.lean @@ -18,7 +18,7 @@ tag := "release-v4.6.0" file := "v4.6.0" %%% -`````markdown +````markdown * Add custom simplification procedures (aka `simproc`s) to `simp`. Simprocs can be triggered by the simplifier on a specified term-pattern. Here is an small example: ```lean import Lean.Meta.Tactic.Simp.BuiltinSimprocs.Nat @@ -246,4 +246,4 @@ Other improvements: * fix `deriving` only deriving the first declaration for some handlers [#3058](https://github.com/leanprover/lean4/pull/3058), fixing [#3057](https://github.com/leanprover/lean4/issues/3057) * do not instantiate metavariables in kabstract/rw for disallowed occurrences [#2539](https://github.com/leanprover/lean4/pull/2539), fixing [#2538](https://github.com/leanprover/lean4/issues/2538) * hover info for `cases h : ...` [#3084](https://github.com/leanprover/lean4/pull/3084) -````` +```` diff --git a/Manual/Releases/v4.7.0.lean b/Manual/Releases/v4_7_0.lean similarity index 99% rename from Manual/Releases/v4.7.0.lean rename to Manual/Releases/v4_7_0.lean index 3ced7c31..277b991e 100644 --- a/Manual/Releases/v4.7.0.lean +++ b/Manual/Releases/v4_7_0.lean @@ -18,7 +18,7 @@ tag := "release-v4.7.0" file := "v4.7.0" %%% -`````markdown +````markdown * `simp` and `rw` now use instance arguments found by unification, rather than always resynthesizing. For backwards compatibility, the original behaviour is available via `set_option tactic.skipAssignedInstances false`. @@ -202,4 +202,4 @@ Other improvements: Lake fixes: * Warn on fetch cloud release failure [#3401](https://github.com/leanprover/lean4/pull/3401) * Cloud release trace & `lake build :release` errors [#3248](https://github.com/leanprover/lean4/pull/3248) -````` +```` diff --git a/Manual/Releases/v4.8.0.lean b/Manual/Releases/v4_8_0.lean similarity index 99% rename from Manual/Releases/v4.8.0.lean rename to Manual/Releases/v4_8_0.lean index e009d8da..91038453 100644 --- a/Manual/Releases/v4.8.0.lean +++ b/Manual/Releases/v4_8_0.lean @@ -18,7 +18,7 @@ tag := "release-v4.8.0" file := "v4.8.0" %%% -`````markdown +````markdown ### Language features, tactics, and metaprograms * **Functional induction principles.** @@ -511,4 +511,4 @@ fact.eq_def : * `Option.toMonad` has been renamed to `Option.getM` and the unneeded `[Monad m]` instance argument has been removed. -````` +```` diff --git a/Manual/Releases/v4.9.0.lean b/Manual/Releases/v4_9_0.lean similarity index 99% rename from Manual/Releases/v4.9.0.lean rename to Manual/Releases/v4_9_0.lean index ff70fd05..0f3508bd 100644 --- a/Manual/Releases/v4.9.0.lean +++ b/Manual/Releases/v4_9_0.lean @@ -18,7 +18,7 @@ tag := "release-v4.9.0" file := "v4.9.0" %%% -`````markdown +````markdown ### Language features, tactics, and metaprograms * **Definition transparency** @@ -339,4 +339,4 @@ While most changes could be considered to be a breaking change, this section mak * The `MessageData.ofFormat` constructor has been turned into a function. If you need to inspect `MessageData`, you can pattern-match on `MessageData.ofFormatWithInfos`. -````` +```` diff --git a/Manual/Runtime.lean b/Manual/Runtime.lean index d1be5c0c..f8d7464e 100644 --- a/Manual/Runtime.lean +++ b/Manual/Runtime.lean @@ -46,6 +46,24 @@ These services include: There are many primitive operators. They are described in their respective sections under {ref "basic-types"}[Basic Types]. +# Boxing +%%% +tag := "boxing" +%%% + +:::paragraph +Lean values may be represented at runtime in two ways: +* {deftech}_Boxed_ values may be pointers to heap values or require shifting and masking. +* {deftech}_Unboxed_ values are immediately available. +::: + +Boxed values are either a pointer to an object, in which case the lowest-order bit is 0, or an immediate value, in which case the lowest-order bit is 1 and the value is found by shifting the representation to the right by one bit. + +Types with an unboxed representation, such as {name}`UInt8` and {tech}[enum inductive] types, are represented as the corresponding C types in contexts where the compiler can be sure that the value has said type. +In some contexts, such as generic container types like {name}`Array`, otherwise-unboxed values must be boxed prior to storage. +In other words, {name}`Bool.not` is called with and returns unboxed `uint8_t` values because the {tech}[enum inductive] type {name}`Bool` has an unboxed representation, but the individual {name}`Bool` values in an {lean}`Array Bool` are boxed. +A field of type {lean}`Bool` in an inductive type's constructor is represented unboxed, while {lean}`Bool`s stored in polymorphic fields that are instantiated as {lean}`Bool` are boxed. + # Reference Counting %%% @@ -204,30 +222,30 @@ def process' (str : String) : String × String:= The IR for {lean}`process` includes no `inc` or `dec` instructions. If the incoming string `x_1` is a unique reference, then it is still a unique reference when passed to {name}`String.set`, which can then use in-place modification: ```leanOutput p1 (allowDiff := 5) -[result] -def process._closed_1 : obj := - let x_1 : obj := ""; - ret x_1 -def process (x_1 : obj) : obj := - let x_2 : u32 := 32; - let x_3 : obj := 0; - let x_4 : obj := String.set x_1 x_3 x_2; - let x_5 : obj := process._closed_1; - let x_6 : obj := ctor_0[Prod.mk] x_4 x_5; - ret x_6 +[Compiler.IR] [result] + def process._closed_0 : obj := + let x_1 : obj := ""; + ret x_1 + def process (x_1 : obj) : obj := + let x_2 : obj := 0; + let x_3 : u32 := 32; + let x_4 : obj := String.set x_1 x_2 x_3; + let x_5 : obj := process._closed_0; + let x_6 : obj := ctor_0[Prod.mk] x_4 x_5; + ret x_6 ``` The IR for {lean}`process'`, on the other hand, increments the reference count of the string just before calling {name}`String.set`. -Thus, the modified string `x_4` is a copy, regardless of whether the reference to `x_1` is unique: +Thus, the modified string `x_4` is a copy, regardless of whether the original reference to `x_1` is unique: ```leanOutput p2 -[result] -def process' (x_1 : obj) : obj := - let x_2 : u32 := 32; - let x_3 : obj := 0; - inc x_1; - let x_4 : obj := String.set x_1 x_3 x_2; - let x_5 : obj := ctor_0[Prod.mk] x_4 x_1; - ret x_5 +[Compiler.IR] [result] + def process' (x_1 : obj) : obj := + let x_2 : obj := 0; + let x_3 : u32 := 32; + inc x_1; + let x_4 : obj := String.set x_1 x_2 x_3; + let x_5 : obj := ctor_0[Prod.mk] x_4 x_1; + ret x_5 ``` ::: @@ -246,35 +264,35 @@ def discardElems : List α → List Unit This emits the following IR: ```leanOutput discardElems -[result] -def discardElems._rarg (x_1 : obj) : obj := - case x_1 : obj of - List.nil → - let x_2 : obj := ctor_0[List.nil]; - ret x_2 - List.cons → - let x_3 : u8 := isShared x_1; - case x_3 : u8 of - Bool.false → - let x_4 : obj := proj[1] x_1; - let x_5 : obj := proj[0] x_1; - dec x_5; - let x_6 : obj := discardElems._rarg x_4; - let x_7 : obj := ctor_0[PUnit.unit]; - set x_1[1] := x_6; - set x_1[0] := x_7; - ret x_1 - Bool.true → - let x_8 : obj := proj[1] x_1; - inc x_8; - dec x_1; - let x_9 : obj := discardElems._rarg x_8; - let x_10 : obj := ctor_0[PUnit.unit]; - let x_11 : obj := ctor_1[List.cons] x_10 x_9; - ret x_11 -def discardElems (x_1 : ◾) : obj := - let x_2 : obj := pap discardElems._rarg; - ret x_2 +[Compiler.IR] [result] + def discardElems._redArg (x_1 : obj) : obj := + case x_1 : obj of + List.nil → + let x_2 : obj := ctor_0[List.nil]; + ret x_2 + List.cons → + let x_3 : u8 := isShared x_1; + case x_3 : u8 of + Bool.false → + let x_4 : obj := proj[1] x_1; + let x_5 : obj := proj[0] x_1; + dec x_5; + let x_6 : obj := ctor_0[PUnit.unit]; + let x_7 : obj := discardElems._redArg x_4; + set x_1[1] := x_7; + set x_1[0] := x_6; + ret x_1 + Bool.true → + let x_8 : obj := proj[1] x_1; + inc x_8; + dec x_1; + let x_9 : obj := ctor_0[PUnit.unit]; + let x_10 : obj := discardElems._redArg x_8; + let x_11 : obj := ctor_1[List.cons] x_9 x_10; + ret x_11 + def discardElems (x_1 : ◾) (x_2 : obj) : obj := + let x_3 : obj := discardElems._redArg x_2; + ret x_3 ``` In the IR, the {name}`List.cons` case explicitly checks whether the argument value is shared (i.e. whether it's reference count is greater than one). @@ -310,7 +328,7 @@ tag := "ffi" %%% -**The current interface was designed for internal use in Lean and should be considered unstable**. +*The current interface was designed for internal use in Lean and should be considered unstable*. It will be refined and extended in the future. Lean offers efficient interoperability with any language that supports the C ABI. @@ -385,15 +403,15 @@ local macro "..." : term => ``(«...») In the {tech key:="application binary interface"}[ABI], Lean types are translated to C types as follows: * The integer types {lean}`UInt8`, …, {lean}`UInt64`, {lean}`USize` are represented by the C types {c}`uint8_t`, ..., {c}`uint64_t`, {c}`size_t`, respectively. - If their {ref "fixed-int-runtime"}[run-time representation] requires boxing, then they are unboxed at the FFI boundary. + If their {ref "fixed-int-runtime"}[run-time representation] requires {tech key:="boxed"}[boxing], then they are unboxed at the FFI boundary. * {lean}`Char` is represented by {c}`uint32_t`. * {lean}`Float` is represented by {c}`double`. * {name}`Nat` and {name}`Int` are represented by {c}`lean_object *`. - Their runtime values is either a pointer to an opaque bignum object or, if the lowest bit of the "pointer" is 1 ({c}`lean_is_scalar`), an encoded unboxed natural number or integer ({c}`lean_box`/{c}`lean_unbox`). + Their runtime values is either a pointer to an opaque bignum object or, if the lowest bit of the "pointer" is 1 ({c}`lean_is_scalar`), an encoded natural number or integer ({c}`lean_box`/{c}`lean_unbox`). * A universe {lean}`Sort u`, type constructor {lean}`... → Sort u`, or proposition {lean}`p`​` :`{lean}` Prop` is {tech}[irrelevant] and is either statically erased (see above) or represented as a {c}`lean_object *` with the runtime value {c}`lean_box(0)` * The ABI for other inductive types that don't have special compiler support depends on the specifics of the type. It is the same as the {ref "run-time-inductives"}[run-time representation] of these types. - Its runtime value is a pointer to an object of a subtype of {c}`lean_object` (see the "Inductive types" section below) or the unboxed value {c}`lean_box(cidx)` for the {c}`cidx`th constructor of an inductive type if this constructor does not have any relevant parameters. + Its runtime value is either a pointer to an object of a subtype of {c}`lean_object` (see the "Inductive types" section below) or it is the value {c}`lean_box(cidx)` for the {c}`cidx`th constructor of an inductive type if this constructor does not have any relevant parameters. ```lean (show := false) variable (u : Unit) diff --git a/Manual/Simp.lean b/Manual/Simp.lean index c208a506..24507131 100644 --- a/Manual/Simp.lean +++ b/Manual/Simp.lean @@ -76,7 +76,7 @@ simp $_:optConfig $[only]? $[ [ $[$e],* ] ]? $[at $[$h]*]? ::: In other words, an invocation of a simplification tactic takes the following modifiers, in order, all of which are optional: - * A {ref "tactic-config"}[configuration options], which should include the fields of {name}`Lean.Meta.Simp.Config` or {name}`Lean.Meta.DSimp.Config`, depending on whether the simplifier being invoked is a version of {tactic}`simp` or a version of {tactic}`dsimp`. + * A set of {ref "tactic-config"}[configuration options], which should include the fields of {name}`Lean.Meta.Simp.Config` or {name}`Lean.Meta.DSimp.Config`, depending on whether the simplifier being invoked is a version of {tactic}`simp` or a version of {tactic}`dsimp`. * The {keywordOf Lean.Parser.Tactic.simp}`only` modifier excludes the default simp set, instead beginning with an empty{margin}[Technically, the simp set always includes {name}`eq_self` and {name}`iff_self` in order to discharge reflexive cases.] simp set. * The lemma list adds or removes lemmas from the simp set. There are three ways to specify lemmas in the lemma list: * `*`, which adds all assumptions in the proof state to the simp set diff --git a/Manual/SourceFiles.lean b/Manual/SourceFiles.lean index aef6ed29..0476ae14 100644 --- a/Manual/SourceFiles.lean +++ b/Manual/SourceFiles.lean @@ -94,11 +94,11 @@ tag := "keywords-and-identifiers" An {tech}[identifier] consists of one or more identifier components, separated by `'.'`.{index}[identifier] {deftech}[Identifier components] consist of a letter or letter-like character or an underscore (`'_'`), followed by zero or more identifier continuation characters. -Letters are English letters, upper- or lowercase, and the letter-like characters include a range of non-English alphabetic scripts, including the Greek script which is widely used in Lean, as well as the members of the Unicode letter-like symbol block, which contains a number of double-struck characters (including `ℕ` and `ℤ`) and abbreviations. +Letters are English letters, upper- or lowercase, and the letter-like characters include a range of non-English alphabetic scripts, including the Greek script which is widely used in Lean, the Coptic script, the members of the Unicode letter-like symbol block, which contains a number of double-struck characters (including `ℕ` and `ℤ`) and abbreviations, the Latin-1 supplemental letters (with the exception of `×` and `÷`), and the Latin Extended-A block. Identifier continuation characters consist of letters, letter-like characters, underscores (`'_'`), exclamation marks (`!`), question marks (`?`), subscripts, and single quotes (`'`). As an exception, underscore alone is not a valid identifier. -````lean (show := false) +```lean (show := false) def validIdentifier (str : String) : IO String := Lean.Parser.identFn.test str @@ -130,30 +130,32 @@ def validIdentifier (str : String) : IO String := #check_msgs in #eval validIdentifier "αποδεικνύοντας" - -/- Here's some things that probably should be identifiers but aren't at the time of writing -/ - /-- info: "Success! Final stack:\n `κύκ\nRemaining:\n\"λος\"" -/ #check_msgs in #eval validIdentifier "κύκλος" -/-- info: "Failure @0 (⟨1, 0⟩): expected token\nFinal stack:\n \nRemaining: \"øvelse\"" -/ +/-- info: "Success! Final stack:\n `øvelse\nAll input consumed." -/ #check_msgs in #eval validIdentifier "øvelse" -/-- -info: "Failure @0 (⟨1, 0⟩): expected token\nFinal stack:\n \nRemaining: \"Übersetzung\"" --/ +/-- info: "Success! Final stack:\n `Übersetzung\nAll input consumed." -/ #check_msgs in #eval validIdentifier "Übersetzung" +/- Here's some things that probably should be identifiers but aren't at the time of writing -/ + /-- info: "Failure @0 (⟨1, 0⟩): expected token\nFinal stack:\n \nRemaining: \"переклад\"" -/ #check_msgs in #eval validIdentifier "переклад" -```` +/-- info: "Failure @0 (⟨1, 0⟩): expected token\nFinal stack:\n \nRemaining: \"汉语\"" -/ +#check_msgs in +#eval validIdentifier "汉语" + + +``` Identifiers components may also be surrounded by double {deftech}[guillemets] (`'«'` and `'»'`). Such identifier components may contain any character at all aside from `'»'`, even `'«'`, `'.'`, and newlines. diff --git a/Manual/Tactics.lean b/Manual/Tactics.lean index 92434ecd..ca37e22d 100644 --- a/Manual/Tactics.lean +++ b/Manual/Tactics.lean @@ -272,7 +272,7 @@ This can be used to refer to local lemmas by their theorem statement rather than ::::example "Assumptions by Type" :::keepEnv -```lean +```lean (show := false) variable (n : Nat) ``` In the following proof, {tactic}`cases` is repeatedly used to analyze a number. @@ -863,22 +863,22 @@ Generally speaking, {tactic}`have` should be used when proving an intermediate l :::tactic "have" ::: -:::tactic Lean.Parser.Tactic.tacticHaveI_ +:::tactic Lean.Parser.Tactic.tacticHave__ ::: -:::tactic Lean.Parser.Tactic.tacticHave'_ +:::tactic Lean.Parser.Tactic.tacticHave' ::: -:::tactic Lean.Parser.Tactic.tacticLet_ show:="let" +:::tactic Lean.Parser.Tactic.tacticLet__ show:="let" ::: :::tactic Lean.Parser.Tactic.letrec show:="let rec" ::: -:::tactic Lean.Parser.Tactic.tacticLetI_ +:::tactic Lean.Parser.Tactic.tacticLetI__ ::: -:::tactic Lean.Parser.Tactic.tacticLet'_ +:::tactic Lean.Parser.Tactic.tacticLet'__ ::: ## Configuration diff --git a/Manual/Tactics/Custom.lean b/Manual/Tactics/Custom.lean index dd082728..38b7df29 100644 --- a/Manual/Tactics/Custom.lean +++ b/Manual/Tactics/Custom.lean @@ -132,120 +132,3 @@ Multiple {keywordOf Lean.Parser.Command.macro_rules}`macro_rules` declarations a Backtracking is at the granularity of {keywordOf Lean.Parser.Command.macro_rules}`macro_rules` declarations, not their individual cases. ::: :::: - - -# The Tactic Monad -%%% -tag := "tactic-monad" -draft := true -%%% - -::: planned 67 - * Relationship to {name}`Lean.Elab.Term.TermElabM`, {name}`Lean.Meta.MetaM` - * Overview of available effects - * Checkpointing -::: - -{docstring Lean.Elab.Tactic.Tactic} - -{docstring Lean.Elab.Tactic.TacticM} - -{docstring Lean.Elab.Tactic.run} - -{docstring Lean.Elab.Tactic.runTermElab} - -## Control - -{docstring Lean.Elab.Tactic.tryTactic} - -{docstring Lean.Elab.Tactic.tryTactic?} - -## Expressions - -{docstring Lean.Elab.Tactic.ensureHasNoMVars} - -{docstring Lean.Elab.Tactic.getFVarId} - -{docstring Lean.Elab.Tactic.getFVarIds} - -{docstring Lean.Elab.Tactic.sortMVarIdsByIndex} - -{docstring Lean.Elab.Tactic.sortMVarIdArrayByIndex} - -## Source Locations - -{docstring Lean.Elab.Tactic.withLocation} - -## Goals - -{docstring Lean.Elab.Tactic.getGoals} - -{docstring Lean.Elab.Tactic.setGoals} - -{docstring Lean.Elab.Tactic.getMainGoal} - -{docstring Lean.Elab.Tactic.getMainTag} - -{docstring Lean.Elab.Tactic.closeMainGoal} - -{docstring Lean.Elab.Tactic.focus} - -{docstring Lean.Elab.Tactic.tagUntaggedGoals} - -{docstring Lean.Elab.Tactic.getUnsolvedGoals} - -{docstring Lean.Elab.Tactic.pruneSolvedGoals} - -{docstring Lean.Elab.Tactic.appendGoals} - -{docstring Lean.Elab.Tactic.closeMainGoalUsing} - -## Term Elaboration - -{docstring Lean.Elab.Tactic.elabTerm} - -{docstring Lean.Elab.Tactic.elabTermEnsuringType} - -{docstring Lean.Elab.Tactic.elabTermWithHoles} - - -## Low-Level Operations - -These operations are primarily used as part of the implementation of {name}`TacticM` or of particular tactics. -It's rare that they are useful when implementing new tactics. - -### Monad Class Implementations - -These operations are exposed through standard Lean monad type classes. - -{docstring Lean.Elab.Tactic.tryCatch} - -{docstring Lean.Elab.Tactic.liftMetaMAtMain} - -{docstring Lean.Elab.Tactic.getMainModule} - -{docstring Lean.Elab.Tactic.orElse} - -### Macro Expansion - -{docstring Lean.Elab.Tactic.getCurrMacroScope} - -{docstring Lean.Elab.Tactic.adaptExpander} - -### Simplifier - -{docstring Lean.Elab.Tactic.elabSimpArgs} - -{docstring Lean.Elab.Tactic.elabSimpConfig} - -{docstring Lean.Elab.Tactic.elabSimpConfigCtxCore} - -{docstring Lean.Elab.Tactic.dsimpLocation'} - -{docstring Lean.Elab.Tactic.elabDSimpConfigCore} - -### Attributes - -{docstring Lean.Elab.Tactic.tacticElabAttribute} - -{docstring Lean.Elab.Tactic.mkTacticAttribute} diff --git a/Manual/Terms.lean b/Manual/Terms.lean index feb7aa46..03e2d343 100644 --- a/Manual/Terms.lean +++ b/Manual/Terms.lean @@ -202,7 +202,9 @@ open B #eval x ``` ```leanOutput ambi (whitespace := lax) -ambiguous, possible interpretations +Ambiguous term + x +Possible interpretations: B.x : String A.x : String @@ -753,7 +755,7 @@ However, {lean}`Username.validate` can't be called on {lean}`"root"` using field #eval "root".validate ``` ```leanOutput notString -invalid field 'validate', the environment does not contain 'String.validate' +Invalid field `validate`: The environment does not contain `String.validate` "root" has type String @@ -884,7 +886,7 @@ numerals are polymorphic in Lean, but the numeral `4` cannot be used in a contex Array ?m.4 due to the absence of the instance above -Additional diagnostic information may be available using the `set_option diagnostics true` command. +Hint: Additional diagnostic information may be available using the `set_option diagnostics true` command. ``` Using pipeline field notation causes the array to be inserted at the first type-correct position: @@ -1285,7 +1287,7 @@ $x:ident@$h:ident:$e -- Literals /-- error: Invalid pattern: Expected a constructor or constant marked with `[match_pattern]` -/ -#check_msgs in +#guard_msgs in def foo (x : String) : String := match x with | "abc" => "" @@ -1321,9 +1323,9 @@ partial instance : OfNat Blah n where -- This shows that the partial instance was not unfolded /-- error: Dependent elimination failed: Type mismatch when solving this alternative: it has type - motive (instOfNatBlah_1.f 0) : Sort ?u.903 + motive (instOfNatBlah_1.f 0) : Sort ?u.1340 but is expected to have type - motive n✝ : Sort ?u.903 + motive n✝ : Sort ?u.1340 -/ #check_msgs in def defg (n : Blah) : Bool := @@ -1332,9 +1334,9 @@ def defg (n : Blah) : Bool := /-- error: Dependent elimination failed: Type mismatch when solving this alternative: it has type - motive (Float.ofScientific 25 true 1) : Sort ?u.946 + motive (Float.ofScientific 25 true 1) : Sort ?u.1439 but is expected to have type - motive x✝ : Sort ?u.946 + motive x✝ : Sort ?u.1439 -/ #check_msgs in def twoPointFive? : Float → Option Float @@ -1391,7 +1393,7 @@ is not definitionally equal to the right-hand side 3 = 5 ⊢ 3 = 3 ∨ 3 = 5 --- -info: { val := 3, val2 := ?m.1743, ok := ⋯ } : OnlyThreeOrFive +info: { val := 3, val2 := ?m.2638, ok := ⋯ } : OnlyThreeOrFive -/ #check_msgs in #check OnlyThreeOrFive.mk 3 .. @@ -1439,9 +1441,21 @@ This {tech}[indexed family] describes mostly-balanced trees, with the depth enco ```lean inductive BalancedTree (α : Type u) : Nat → Type u where | empty : BalancedTree α 0 - | branch (left : BalancedTree α n) (val : α) (right : BalancedTree α n) : BalancedTree α (n + 1) - | lbranch (left : BalancedTree α (n + 1)) (val : α) (right : BalancedTree α n) : BalancedTree α (n + 2) - | rbranch (left : BalancedTree α n) (val : α) (right : BalancedTree α (n + 1)) : BalancedTree α (n + 2) + | branch + (left : BalancedTree α n) + (val : α) + (right : BalancedTree α n) : + BalancedTree α (n + 1) + | lbranch + (left : BalancedTree α (n + 1)) + (val : α) + (right : BalancedTree α n) : + BalancedTree α (n + 2) + | rbranch + (left : BalancedTree α n) + (val : α) + (right : BalancedTree α (n + 1)) : + BalancedTree α (n + 2) ``` To begin the implementation of a function to construct a perfectly balanced tree with some initial element and a given depth, a {tech}[hole] can be used for the definition. @@ -1461,7 +1475,9 @@ depth : Nat Matching on the expected depth and inserting holes results in an error message for each hole. These messages demonstrate that the expected type has been refined, with `depth` replaced by the matched values. ```lean (error := true) (name := fill2) -def BalancedTree.filledWith (x : α) (depth : Nat) : BalancedTree α depth := +def BalancedTree.filledWith + (x : α) (depth : Nat) : + BalancedTree α depth := match depth with | 0 => _ | n + 1 => _ @@ -1487,7 +1503,7 @@ depth n : Nat Matching on the depth of a tree and the tree itself leads to a refinement of the tree's type according to the depth's pattern. This means that certain combinations are not well-typed, such as {lean}`0` and {name BalancedTree.branch}`branch`, because refining the second discriminant's type yields {lean}`BalancedTree α 0` which does not match the constructor's type. -````lean (name := patfail) (error := true) +```lean (name := patfail) (error := true) def BalancedTree.isPerfectlyBalanced (n : Nat) (t : BalancedTree α n) : Bool := match n, t with @@ -1496,7 +1512,7 @@ def BalancedTree.isPerfectlyBalanced isPerfectlyBalanced left && isPerfectlyBalanced right | _, _ => false -```` +``` ```leanOutput patfail type mismatch left.branch val right @@ -1706,9 +1722,9 @@ No {tech}[ι-reduction] is possible, because the value being matched is a variab In the case of {lean}`k + 1`, that is, {lean}`Nat.add k (.succ .zero)`, the second pattern matches, so it reduces to {lean}`Nat.succ (Nat.add k .zero)`. The second pattern now matches, yielding {lean}`Nat.succ k`, which is a valid pattern. ::: -````lean (show := false) +```lean (show := false) end -```` +``` :::: diff --git a/Manual/Types.lean b/Manual/Types.lean index 81e7dbba..79fa830b 100644 --- a/Manual/Types.lean +++ b/Manual/Types.lean @@ -20,6 +20,7 @@ set_option maxRecDepth 800 #doc (Manual) "The Type System" => %%% tag := "type-system" +shortContextTitle := "Type System" %%% {deftech}_Terms_, also known as {deftech}_expressions_, are the fundamental units of meaning in Lean's core language. @@ -104,12 +105,12 @@ def LengthList (α : Type u) : Nat → Type u ``` Because Lean's tuples nest to the right, multiple nested parentheses are not needed: -````lean +```lean example : LengthList Int 0 := () example : LengthList String 2 := ("Hello", "there", ()) -```` +``` If the length does not match the number of entries, then the computed type will not match the term: ```lean error:=true name:=wrongNum @@ -398,18 +399,18 @@ tag := "level-expressions" Levels that occur in a definition are not restricted to just variables and addition of constants. More complex relationships between universes can be defined using level expressions. -```` +``` Level ::= 0 | 1 | 2 | ... -- Concrete levels | u, v -- Variables | Level + n -- Addition of constants | max Level Level -- Least upper bound | imax Level Level -- Impredicative LUB -```` +``` Given an assignment of level variables to concrete numbers, evaluating these expressions follows the usual rules of arithmetic. The `imax` operation is defined as follows: -$$``\mathtt{imax}\ u\ v = \begin{cases}0 & \mathrm{when\ }v = 0\\\mathtt{max}\ u\ v&\mathrm{otherwise}\end{cases}`` +$$`\mathtt{imax}\ u\ v = \begin{cases}0 & \mathrm{when\ }v = 0\\\mathtt{max}\ u\ v&\mathrm{otherwise}\end{cases}` `imax` is used to implement {tech}[impredicative] quantification for {lean}`Prop`. In particular, if `A : Sort u` and `B : Sort v`, then `(x : A) → B : Sort (imax u v)`. diff --git a/elan-init b/elan-init new file mode 100755 index 00000000..3d14ee40 Binary files /dev/null and b/elan-init differ diff --git a/lake-manifest.json b/lake-manifest.json index 3d11c9be..610e7810 100644 --- a/lake-manifest.json +++ b/lake-manifest.json @@ -5,7 +5,7 @@ "type": "git", "subDir": null, "scope": "", - "rev": "76be179d1e805398bbd9b834945e5d07790cbf3e", + "rev": "ce4108e2edf3ff980484425aa41d946c5549d16d", "name": "verso", "manifestFile": "lake-manifest.json", "inputRev": "main", @@ -25,7 +25,7 @@ "type": "git", "subDir": null, "scope": "", - "rev": "ffe97dbb0002e15acd571649e93cecfb3d7c47d5", + "rev": "8d780d556de7ed7b1006805bcbc64959b8173e1d", "name": "subverso", "manifestFile": "lake-manifest.json", "inputRev": "main", diff --git a/lakefile.lean b/lakefile.lean index 9ffd1af5..11dfd4fa 100644 --- a/lakefile.lean +++ b/lakefile.lean @@ -3,6 +3,7 @@ Copyright (c) 2024 Lean FRO LLC. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Author: David Thrane Christiansen -/ +import Lean.Elab.Import import Lake open Lake DSL open System (FilePath) @@ -19,12 +20,15 @@ package "verso-manual" where #["-Wl,-ignore_optimization_hints"] else #[] + leanOptions := #[⟨`weak.verso.code.warnLineLength, .ofNat 72⟩] lean_lib Manual where def figureDir : FilePath := "figures" def figureOutDir : FilePath := "static/figures" +def errorExplanationExOutDir : FilePath := + defaultBuildDir / "error_explanation_examples" def ensureDir (dir : System.FilePath) : IO Unit := do if !(← dir.pathExists) then @@ -41,6 +45,10 @@ target subversoExtractMod : FilePath := do exe.fetch +lean_exe extract_explanation_examples where + root := `ExtractExplanationExamples + supportInterpreter := true + target figures : Array FilePath := do let files := (← figureDir.readDir).filterMap fun f => do @@ -78,6 +86,188 @@ target figures : Array FilePath := do pure srcInfo +/- +This section contains infrastructure for preprocessing code blocks in error explanations. Error +explanation code blocks must be allowed to contain imports, so we must run the full frontend over +each one. To improve efficiency, we do this in a preprocessing step in which code blocks with the +same imports are grouped together, avoiding the need to repeatedly import the same modules anew. + +Preprocessing proceeds as follows: + +1. Error explanations are extracted from the elaboration environment of this Lakefile (which + matches the Lean version used to elaborate these examples in the manual itself) using the + `all_error_explanations%` macro; we then extract any Lean code blocks these contain. +2. We group the extracted code blocks by their headers (`mkPreprocessingGroups`). We skip any code + blocks for which there already exists a valid JSON file in the preprocessing output directory + (determined by `hasUsableCache` by source hash and the Lean version used to generate the JSON). +3. The code blocks in each group are written to Lean modules in a temporary directory and + preprocessed by the `extract_explanation_examples` tool (see `preprocessGroup`). Note that while + we call this tool once for each preprocessing group, each code block gets a separate JSON output + file (allowing us to cache on a per-code-block, rather than per-group, basis; this is especially + important because the majority of the code blocks have no imports and thus belong to the same + group). + +To depend on the preprocessed JSON, modules can import `PreprocessedExplanations`, which depends on +this preprocessing target and exposes a constant `preprocessedExplanationsRoot` that gives the file +path to the directory to which the JSON files are written. +-/ +section ExplanationPreprocessing + +open Lean Meta + +/- This must agree with `mkExampleName` in `Manual.ErrorExplanation`. -/ +private def mkExampleName (errorName : Name) (idx : Nat) : Name := + errorName ++ s!"block{idx}".toName + +/-- +Returns `true` if there exists a cached elaboration result for code block `id` +with hash `hash` on the current Lean version. +-/ +def hasUsableCache (id : String) (hash : UInt64) : IO Bool := do + let path := errorExplanationExOutDir / (id ++ ".json") + unless (← System.FilePath.pathExists path) do return false + let cacheStr ← IO.FS.readFile path + let .ok json := Json.parse cacheStr | return false + let .ok cacheHash := json.getObjVal? "hash" >>= FromJson.fromJson? (α := UInt64) + | return false + let .ok version := json.getObjVal? "version" >>= Json.getStr? + | return false + return version == Lean.versionString && cacheHash == hash + +/-- +Runs the explanation example extractor over every entry in `examples`, +generating output JSON files in `explanationExamplesDir`. All entries in +`examples` must have equivalent headers, as the same environment will be reused +for each. +-/ +def preprocessGroup (examples : Array (Name × String)) (exePath : FilePath) : IO Unit := + IO.FS.withTempDir fun tmpDir => do + let examplePaths ← examples.mapM fun (name, input) => do + let path := tmpDir / (name.toString ++ ".lean") + IO.FS.writeFile path input + pure path.toString + let childConfig := { + cmd := exePath.toString + args := #[errorExplanationExOutDir.toString] ++ examplePaths + stdin := .piped, stdout := .piped, stderr := .piped + } + let out ← IO.Process.output childConfig + if out.exitCode != 0 then + let args := childConfig.args.foldl (s!"{·} \"{·}\"") "" + let cmd := s!"extract_explanation_examples{args}" + throw <| IO.userError <| + s!"Nonzero exit code {out.exitCode} when running `{cmd}`\n" ++ + s!"Stderr:\n{out.stderr}\n\nStdout:\n{out.stdout}\n\n" + +deriving instance BEq, Hashable for Import + +/-- +Generates groups from `codeBlocks` of examples with equivalent headers, +discarding those that already have a valid cache. +-/ +def mkPreprocessingGroups (codeBlocks : Array (Name × String)) : + IO (Array (Array (Name × String))) := do + let (map : Std.HashMap (Array Import) _) ← + codeBlocks.foldlM (init := {}) fun acc (name, block) => do + if (← hasUsableCache name.toString (hash block)) then + pure acc + else + let inputCtx := Parser.mkInputContext block "Main.lean" + let (header, _, _) ← Parser.parseHeader inputCtx + let imports := Elab.headerToImports header + let acc := if acc.contains imports then acc else acc.insert imports #[] + pure <| acc.modify imports fun namedBlocks => namedBlocks.push (name, block) + return map.toArray.map Prod.snd + +/-- The state of a Markdown traversal: are we inside or outside a code block? -/ +inductive MDTraversalState where + | outsideCode + | insideCode (isLean : Bool) (numTicks : Nat) + +/-- +Extracts Lean code blocks from `input` and returns them with their indexed names. +-/ +def extractCodeBlocks (exampleName : Name) (input : String) : Array (Name × String) := Id.run do + let lines := input.splitOn "\n" + let mut codeBlocks : Array (Name × String) := #[] + + let mut state := MDTraversalState.outsideCode + let mut acc : Array String := #[] + let mut idx := 0 + for line in lines do + if line.startsWith "```" then + let numTicks := line.takeWhile (· == '`') |>.length + match state with + | .outsideCode => + let lang := line.drop numTicks |>.takeWhile (! ·.isWhitespace) + state := .insideCode (lang == "lean" || lang.isEmpty) numTicks + | .insideCode isLean expectedTicks => + if numTicks == expectedTicks then + state := .outsideCode + if isLean then + -- Match MD4Lean by including the trailing newline: + acc := acc.push "" + let code := "\n".intercalate acc.toList + codeBlocks := codeBlocks.push (mkExampleName exampleName idx, code) + acc := #[] + idx := idx + 1 + else + acc := acc.push line + else if state matches .insideCode true _ then + acc := acc.push line + return codeBlocks + +deriving instance ToExpr for MessageSeverity +deriving instance ToExpr for ErrorExplanation.Metadata +deriving instance ToExpr for DeclarationLocation +deriving instance ToExpr for ErrorExplanation + +/-- +Elaborates to an expression containing all error explanation entries. This +provides access to error explanations outside of the metaprogramming monads. +-/ +elab "all_error_explanations%" : term => + return toExpr <| getErrorExplanationsRaw (← getEnv) + +/-- Preprocess code examples in error explanations. -/ +target preprocess_explanations_async : Unit := do + let exeJob ← extract_explanation_examples.fetch + let explans := all_error_explanations% + let allBlocks := explans.flatMap fun (name, explan) => + extractCodeBlocks name explan.doc + let groups ← mkPreprocessingGroups allBlocks + + let writeModuleJob ← Job.async do + let moduleSrc := s!"def preprocessedExplanationsRoot : System.FilePath :=\n \ + \"{errorExplanationExOutDir}\"\n" + let some mod ← findModule? `PreprocessedExplanations | + error s!"Module `PreprocessedExplanations is missing from the Lake configuration" + buildFileUnlessUpToDate' mod.leanFile do + createParentDirs mod.leanFile + IO.FS.writeFile mod.leanFile moduleSrc + liftM (m := IO) <| try IO.FS.removeFile mod.oleanFile catch + | .noFileOrDirectory .. => pure () + | e => throw e + + let preprocessJob ← exeJob.bindM fun exe => do + let groupJobs ← groups.mapM (Job.async do preprocessGroup · exe) + return Job.mixArray groupJobs + + return preprocessJob.mix writeModuleJob + +/-- +A blocking version of `preprocess_explanations_async`. Ensures that all required +files have been generated when `PreprocessedExplanations` is imported. +-/ +target preprocess_explanations_sync : Unit := do + .pure <$> (← preprocess_explanations_async.fetch).await + +lean_lib PreprocessedExplanations where + needs := #[preprocess_explanations_sync] + srcDir := defaultBuildDir / "src" + +end ExplanationPreprocessing + @[default_target] lean_exe "generate-manual" where needs := #[`@/figures, `@/subversoExtractMod] diff --git a/lean-toolchain b/lean-toolchain index 1efd3655..fff0a20c 100644 --- a/lean-toolchain +++ b/lean-toolchain @@ -1 +1 @@ -leanprover/lean4:v4.21.0-rc3 +leanprover/lean4:v4.22.0-rc3 diff --git a/static/search/domain-mappers.js b/static/search/domain-mappers.js index 7162507b..16010b6a 100644 --- a/static/search/domain-mappers.js +++ b/static/search/domain-mappers.js @@ -225,6 +225,22 @@ const lakeTomlTableMapper = { displayName: "Lake TOML Table", }; +/** + * @type {DomainMapper} + */ +const errorExplanationMapper = { + dataToSearchables: (domainData) => + Object.entries(domainData.contents).map(([key, value]) => { + return { + searchKey: key, + address: `${value[0].address}#${value[0].id}`, + domainId: "Manual.errorExplanation", + ref: value, + }}), + className: "error-explanation-domain", + displayName: "Error Explanation", +}; + export const domainMappers = { "Verso.Genre.Manual.doc": docDomainMapper, "Verso.Genre.Manual.doc.option": docOptionDomainMapper, @@ -240,4 +256,5 @@ export const domainMappers = { "Manual.lakeTomlField": lakeTomlFieldMapper, "Manual.elanCommand": elanCommandMapper, "Manual.elanOpt": elanOptMapper, + "Manual.errorExplanation": errorExplanationMapper }; diff --git a/static/search/search-box.css b/static/search/search-box.css index 9d783b5b..1a66ab3f 100644 --- a/static/search/search-box.css +++ b/static/search/search-box.css @@ -44,7 +44,7 @@ outline: none; font-size: .9rem; padding: .3rem .5rem; - font-family: sans-serif; + font-family: system-ui, sans-serif; /* Fix firefox eating spaces in textContent */ white-space: -moz-pre-space; } @@ -120,10 +120,24 @@ #search-wrapper .search-result.elan-option-domain, #search-wrapper .search-result.env-var-domain, #search-wrapper .search-result.lake-command-domain, +#search-wrapper .search-result.error-explanation-domain, #search-wrapper .search-result.elan-command-domain { font-family: var(--verso-code-font-family); } +#search-wrapper .search-result.full-text { + font-family: var(--verso-text-font-family); +} +#search-wrapper .search-result.full-text .header { + display: block; +} +#search-wrapper .search-result.full-text .header, +#search-wrapper .search-result.full-text .header em { + font-style: normal; + font-family: var(--verso-structure-font-family); + font-weight: bold; +} + #search-wrapper .search-result.tactic-domain, #search-wrapper .search-result.conv-tactic-domain { font-family: var(--verso-code-font-family); @@ -170,6 +184,10 @@ text-decoration: underline; } +#search-wrapper .search-result .domain em { + font-style: italic; +} + #search-wrapper .search-result .domain { text-align: right; color: #777; @@ -179,6 +197,23 @@ font-size: .7rem; } +#search-wrapper .search-result .domain.text-context { + /* For full-text search results, truncate on the left with an ellipsis */ + text-overflow: ellipsis; + direction: rtl; + white-space: nowrap; + overflow: hidden; +} + +#search-wrapper .search-result .domain .context-elem { + display: inline-block; +} + +#search-wrapper .search-result .domain .context-elem:not(:last-child)::after { + content: "»"; + margin: 0 0.25em; +} + /* Couple the domain tighter with the search term on smaller screens, otherwise it's easy to get lost in the results. */ @media screen and (max-width: 700px) { diff --git a/static/search/search-box.js b/static/search/search-box.js index 49ff68ae..59cdd9c4 100644 --- a/static/search/search-box.js +++ b/static/search/search-box.js @@ -18,6 +18,16 @@ const fuzzysort = /** @type {{fuzzysort: Fuzzysort.Fuzzysort}} */ ( /** @type {unknown} */ (window) ).fuzzysort; +const searchIndex = /** @type {{searchIndex: TextSearchIndex}} */ ( + /** @type {unknown} */ (window) +).searchIndex; + + +/** Whether to search word prefixes or whole words in full-text searches. Should match the setting in search-highlight.js. + * @type {boolean} + */ +const expandMatches = true; + /** * Type definitions to help if you have typescript enabled. * @@ -27,8 +37,177 @@ const fuzzysort = /** @type {{fuzzysort: Fuzzysort.Fuzzysort}} */ ( * @typedef {(searchable: Searchable, matchedParts: MatchedPart[], document: Document) => HTMLElement} CustomResultRender * @typedef {{dataToSearchables: DomainDataToSearchables, customRender?: CustomResultRender, displayName: string, className: string}} DomainMapper * @typedef {Record} DomainMappers - * @typedef {{item: Searchable, fuzzysortResult: Fuzzysort.Result, htmlItem: HTMLLIElement}} SearchResult + * @typedef {{ref: string, score: number, doc: {id: string, header: string, context: string, contents: string}}} TextMatch + * @typedef {{item: Searchable, fuzzysortResult: Fuzzysort.Result, htmlItem: HTMLLIElement}|{terms: string, textItem: TextMatch, htmlItem: HTMLLIElement}} SearchResult + * @typedef {{run: (tokens: string[]) => string[]}} ElasticLunrPipeline + * @typedef {{bool?: "AND"|"OR", fields?:Record, expand?: boolean}} SearchConfig + * @typedef {{search: ((term: string, config: SearchConfig) => TextMatch[]), pipeline: ElasticLunrPipeline}|undefined|null} TextSearchIndex + * @typedef {{original: string, stem: string, start: number, end: number}} TextToken + * @typedef {{start: number, end: number, index: number, matches: TextToken[]}} TextSnippet + */ + +/** + * @param {TextSnippet} s1 + * @param {TextSnippet} s2 + * @return {number} + */ +const compareSnippets = (s1, s2) => { + // First compare by number of unique terms + let terms1 = new Set(s1.matches.map((x) => x.stem)); + let terms2 = new Set(s2.matches.map((x) => x.stem)); + let terms = terms1.size - terms2.size; + if (terms !== 0) { + return terms; + } + + // Then by number of matches + let matches = s1.matches.length - s2.matches.length; + if (matches !== 0) { + return matches; + } + + // Finally by index + return s1.index - s2.index; +}; + +/** + * @param {string} text + * @return {TextToken[]} + */ +const tokenizeText = (text) => { + /** @type {TextToken[]} */ + const toks = []; + const regex = /\S+/g; + let match; + while ((match = regex.exec(text)) !== null) { + let stems = searchIndex.pipeline.run([match[0]]); + for (const stem of stems) { + toks.push({ + original: match[0], + start: match.index, + end: match.index + match[0].length, + stem: stem.toLowerCase() + }); + } + } + return toks; +} + +/** + * @type {RegExp} + */ +const wordChar = /\p{L}/u + +/** + * @param {string} text + * @param {number} i + * @return {number} + */ +const wordStartBefore = (text, i) => { + while (i > 0) { + if (!wordChar.test(text[i])) return i + 1; /* Adjust due to start indices being inclusive */ + i--; + } + return i; +} + +/** + * @param {string} text + * @param {number} i + * @return {number} */ +const wordEndAfter = (text, i) => { + while (i < text.length) { + if (!wordChar.test(text[i])) return i; /* This is used as the (exclusive) end index in a slice, so one greater is correct */ + i++; + } + return i; +} + +/** + * @param {string} text + * @param {string} query + * @param {{contextLength?: number, maxSnippets?: number}} options + * @return {Element|null} +*/ +const highlightTextResult = (text, query, options = {}) => { + const { + contextLength = 50, // characters of context around each match + maxSnippets = 3 // maximum number of snippets to return + } = options; + + const terms = searchIndex.pipeline.run(query.trim().toLowerCase().split(/\s+/).filter(term => term.length > 0)); + const toks = tokenizeText(text); + const matches = expandMatches ? toks.filter(t => terms.some(tm => t.stem.startsWith(tm))) : toks.filter(t => terms.includes(t.stem)); + + if (matches.length === 0) { + return null; // No matches found + } + + // Group nearby matches into snippets + /** @type {TextSnippet[]} */ + const snippets = []; + let currentSnippet = null; + for (const match of matches) { + if (!currentSnippet || match.start > currentSnippet.end + contextLength * 2) { + // Start new snippet + currentSnippet = { + start: wordStartBefore(text, Math.max(0, match.start - contextLength)), + end: wordEndAfter(text, Math.min(text.length, match.end + contextLength)), + index: snippets.length, + matches: [match] + }; + snippets.push(currentSnippet); + } else { + // Extend current snippet + currentSnippet.end = wordEndAfter(text, Math.min(text.length, match.end + contextLength)); + currentSnippet.matches.push(match); + } + } + + // Limit number of snippets. First, sort them by quality (which takes unique term occurrences and + // total term count into consideration), then take the N best, then put them back in document order. + const limitedSnippets = snippets.sort(compareSnippets).slice(0, maxSnippets).sort((s1, s2) => s1.index - s2.index); + + // Generate highlighted text for each snippet + const highlightedSnippets = limitedSnippets.map((snippet) => { + let snippetText = text.substring(snippet.start, snippet.end); + + // Adjust match positions relative to snippet start + const relativeMatches = snippet.matches.map(match => ({ + term: match.original, + start: match.start - snippet.start, + end: match.end - snippet.start + })); + + // Sort matches by position (descending) to avoid position shifts during replacement + relativeMatches.sort((a, b) => b.start - a.start); + + // Apply highlighting + for (const match of relativeMatches) { + const before = snippetText.substring(0, match.start); + const highlighted = `${match.term}`; + const after = snippetText.substring(match.end); + snippetText = before + highlighted + after; + } + + // Add ellipses + const prefix = snippet.start > 0 ? ' …' : ''; + const suffix = snippet.end < text.length ? '… ' : ''; + + const elem = document.createElement("span"); + elem.appendChild(document.createTextNode(prefix)); + const m = document.createElement("span"); + m.innerHTML = snippetText; + elem.appendChild(m); + elem.appendChild(document.createTextNode(suffix)); + return elem; + }); + + const elem = document.createElement("span"); + elem.append(...highlightedSnippets); + return elem; +} /** * Maps data from Lean to an object with search terms as keys and a list of results as values. @@ -104,11 +283,80 @@ const searchableToHtml = ( return li; }; +/** + * Maps from a data item to a HTML LI element + * @param {string} term + * @param {TextMatch} match + * @param {Document} document + * @return {HTMLLIElement|null} + */ +const textResultToHtml = ( + term, + match, + document +) => { + const li = document.createElement("li"); + li.role = "option"; + li.className = `search-result full-text`; + li.title = "Full-text search result" + // DEBUG: + // li.title = `Full-text search result (${match.score}) (${match.ref})`; + + const searchTerm = document.createElement("p"); + let inHeader = true; + let headerHl = highlightTextResult(match.doc.header, term, {contextLength: 30}); // Only abbreviate huge headers + if (!headerHl) { + inHeader = false; + headerHl = document.createElement("span"); + headerHl.append(document.createTextNode(match.doc.header)); + } + headerHl.className = "header"; + searchTerm.append(headerHl); + let contentHl = highlightTextResult(match.doc.contents, term, {contextLength: 10}); + if (!contentHl) { + if (!inHeader) { + // Exclude this result. It'd be cleaner to do this elsewhere, but duplicating the string + // processing would be expensive. + return null; + } + contentHl = document.createElement("span"); + contentHl.appendChild(document.createTextNode("...")); + for (const t of term.split(/\s+/)) { + const tm = document.createElement("em"); + tm.appendChild(document.createTextNode(t)); + contentHl.appendChild(tm); + contentHl.appendChild(document.createTextNode("...")); + } + } + searchTerm.append(contentHl); + li.appendChild(searchTerm); + + const domainName = document.createElement("p"); + li.appendChild(domainName); + domainName.className = "domain"; + if (match.doc.context.trim() == "") { + domainName.textContent = "Full-text search"; + } else { + // This is a slight abuse of "domain", but it seems to work well + let context = match.doc.context.replaceAll("\t", " » "); + domainName.append(document.createTextNode(context)); + domainName.classList.add('text-context'); + } + + return li; +}; + /** * @param {SearchResult} result * @returns string */ -const resultToText = (result) => result.fuzzysortResult.target; +const resultToText = (result) => { + if ("fuzzysortResult" in result) { + return result.fuzzysortResult.target; + } else { + return result.terms; + } +} /** * @template T @@ -237,6 +485,12 @@ class SearchBox { comboboxNode ); + // Initialize with a full-text result's query, if one is being presented + const query = new URLSearchParams(window.location.search).get('terms')?.trim(); + comboboxNode.textContent = query ? query : ""; + + + this.comboboxHasVisualFocus = false; this.listboxHasVisualFocus = false; @@ -291,7 +545,7 @@ class SearchBox { // TODO more work on the domain filters // this.domainFilters.push(docDomainFilter); - this.filterOptions(); + this.setValue(query ? query : ""); // Open Button @@ -317,14 +571,21 @@ class SearchBox { } /** - * @param {SearchResult} result + * @param {string} itemAddress + * @param {string|null} query */ - confirmResult(result) { + confirmResult(itemAddress, query=null) { + query = query ? "?terms=" + encodeURIComponent(query) : ""; + const [addr, id] = itemAddress.split('#', 2); + itemAddress = id? addr + query + '#' + id : addr + query; + const base = document.querySelector('base'); if (base) { - window.location.assign(base.href + result.item.address); + let baseNoSlash = base.href.endsWith("/") ? base.href.slice(0, -1) : base.href; + let itemAddressNoSlash = itemAddress.startsWith("/") ? itemAddress.slice(1) : itemAddress; + window.location.assign(baseNoSlash + '/' + itemAddressNoSlash); } else { - window.location.assign(result.item.address); + window.location.assign(itemAddress); } } @@ -335,7 +596,6 @@ class SearchBox { this.filter = value; this.comboboxNode.textContent = this.filter; this.imeRewriter.setSelections([new Range(this.filter.length, 0)]); - this.filterOptions(); } /** @@ -393,12 +653,24 @@ class SearchBox { return null; } - const results = fuzzysort.go(filter, this.preparedData, { + let results = fuzzysort.go(filter, this.preparedData, { limit: 30, threshold: 0.25, }); - if (results.length === 0) { + const textResults = searchIndex ? searchIndex.search(filter, {expand: expandMatches, bool: "AND", fields: {header: {boost: 1.25}, contents: {boost: 1}, context: {boost: 0.1} }}) : []; + + // Normalize the scores for text results by capping at a threshold, to better integrate with fuzzysearch results + const bestPossibleText = 0.8; + const maxTextScore = textResults.reduce((max, item) => Math.max(max, item.score), -Infinity); + if (maxTextScore > bestPossibleText) { + const factor = bestPossibleText / maxTextScore; + for (const res of textResults) { + res.score = res.score * factor; + } + } + + if (results.length === 0 && textResults.length === 0) { this.filteredOptions = []; this.firstOption = null; this.lastOption = null; @@ -412,61 +684,107 @@ class SearchBox { */ let newCurrentOption = null; - for (let i = 0; i < results.length; i++) { - const result = results[i]; - const dataItems = this.mappedData[result.target]; - for (let j = 0; j < dataItems.length; j++) { - const searchable = dataItems[j]; - const option = searchableToHtml( - this.domainMappers, - dataItems[j], - result - .highlight((v) => ({ v })) - .map((v) => - typeof v === "string" - ? { t: "text", v } - : { t: "highlight", v: v.v } - ), - document - ); - /** @type {SearchResult} */ - const searchResult = { - item: searchable, - fuzzysortResult: result, - htmlItem: option, - }; - - option.addEventListener("click", this.onOptionClick(searchResult)); - option.addEventListener( - "pointerover", - this.onOptionPointerover.bind(this) - ); - option.addEventListener( - "pointerout", - this.onOptionPointerout.bind(this) - ); - this.filteredOptions.push(searchResult); - this.listboxNode.appendChild(option); - if (i === 0 && j === 0) { - this.firstOption = searchResult; - } - if (i === results.length - 1 && j === dataItems.length - 1) { - this.lastOption = searchResult; + /** @type {(Fuzzysort.Result|TextMatch) []} */ + let allResults = []; + allResults.push(...textResults); + allResults.push(...results); + allResults.sort((x, y) => y.score - x.score); + allResults = allResults.slice(0, 30); + + this.filteredOptions = []; + this.firstOption = null; + this.lastOption = null; + for (let i = 0; i < allResults.length; i++) { + const result = allResults[i]; + if ("target" in result) { + const dataItems = this.mappedData[result.target]; + for (let j = 0; j < dataItems.length; j++) { + const searchable = dataItems[j]; + const option = searchableToHtml( + this.domainMappers, + dataItems[j], + result + .highlight((v) => ({ v })) + .map((v) => + typeof v === "string" + ? { t: "text", v } + : { t: "highlight", v: v.v } + ), + document + ); + option.title = option.title; // DEBUG: show scores + ` (${result.score})`; + /** @type {SearchResult} */ + const searchResult = { + item: searchable, + fuzzysortResult: result, + htmlItem: option, + }; + + option.addEventListener("click", this.onOptionClick(searchResult)); + option.addEventListener( + "pointerover", + this.onOptionPointerover.bind(this) + ); + option.addEventListener( + "pointerout", + this.onOptionPointerout.bind(this) + ); + this.filteredOptions.push(searchResult); + this.listboxNode.appendChild(option); + if (i === 0 && j === 0) { + this.firstOption = searchResult; + } + if (i === allResults.length - 1 && j === dataItems.length - 1) { + this.lastOption = searchResult; + } + if (currentOptionText === resultToText(searchResult)) { + newCurrentOption = searchResult; + } } - if (currentOptionText === resultToText(searchResult)) { - newCurrentOption = searchResult; + } else { + const option = textResultToHtml(filter, result, document); + if (option) { + /** @type {SearchResult} */ + const searchResult = { + terms: filter, + textItem: result, + htmlItem: option + }; + option.addEventListener("click", this.onOptionClick(searchResult)); + option.addEventListener( + "pointerover", + this.onOptionPointerover.bind(this) + ); + option.addEventListener( + "pointerout", + this.onOptionPointerout.bind(this) + ); + this.filteredOptions.push(searchResult); + this.listboxNode.appendChild(option); + if (i === 0) { + this.firstOption = searchResult; + } + if (i === allResults.length - 1) { + this.lastOption = searchResult; + } + if (currentOptionText === resultToText(searchResult)) { + newCurrentOption = searchResult; + } } } } const moreResults = document.createElement("li"); - moreResults.textContent = `Showing ${results.length}/${results.total} results`; + moreResults.textContent = `Showing ${allResults.length}/${results.total + textResults.length} results`; moreResults.className = `more-results`; this.listboxNode.appendChild(moreResults); if (newCurrentOption) { this.currentOption = newCurrentOption; } + if (!this.currentOption) { + this.currentOption = this.firstOption; + } return newCurrentOption ?? this.firstOption; } @@ -575,7 +893,11 @@ class SearchBox { if (this.listboxHasVisualFocus) { this.setValue(opt(this.currentOption, resultToText) ?? ""); if (this.currentOption) { - this.confirmResult(this.currentOption); + if("fuzzysortResult" in this.currentOption) { + this.confirmResult(this.currentOption.item.address); + } else { + this.confirmResult(this.currentOption.textItem.doc.id, this.currentOption.terms); + } } } this.close(true); @@ -794,7 +1116,11 @@ class SearchBox { */ return () => { this.comboboxNode.textContent = resultToText(result); - this.confirmResult(result); + if ("fuzzysortResult" in result) { + this.confirmResult(result.item.address); + } else { + this.confirmResult(result.textItem.doc.id, resultToText(result)); + } this.close(true); }; } diff --git a/static/search/search-highlight.css b/static/search/search-highlight.css new file mode 100644 index 00000000..32cb3f70 --- /dev/null +++ b/static/search/search-highlight.css @@ -0,0 +1,76 @@ +.text-search-results { + background-color: var(--verso-selected-color); +} + +.text-search-results.focused { + outline: auto; +} + +#highlight-controls { + position: fixed; + bottom: 10px; + right: 10px; + z-index: 99; /* The search box dropdown is 100, and should cover this */ + display: flex; + gap: 4px; + background: white; + border: 1px solid #ccc; + border-radius: 4px; + padding: 4px; + box-shadow: 0 2px 4px rgba(0,0,0,0.1); + box-sizing: border-box; + font-size: .9rem; + font-family: system-ui, sans-serif; + min-width: 400px; + max-width: var(--verso-content-max-width, 47rem); + width: 50%; + display: flex; +} + +@media screen and (max-width: 700px) { + #highlight-controls { + width: 100%; + bottom: 0px; + right: 0px; + border-radius: 4px 4px 0 0; + } +} + + +#highlight-prev, #highlight-next, #highlight-close { + padding: 6px 8px; + border-radius: 2px; + cursor: pointer; + background: #f8f9fa; + border: 1px solid #ddd; +} + +@media screen and (max-width: 700px) { + /* Touch-friendly sizing */ + #highlight-prev, #highlight-next, #highlight-close { + min-width: var(--verso-burger-width, 1.5rem); + min-height: var(--verso-burger-height, 1.5rem); + } +} + +#highlight-close { + margin-left: 4px; +} + +#highlight-current-count { + padding: 6px 8px; + background: #f8f9fa; + border: 1px solid #ddd; + border-radius: 2px; + min-width: 40px; + text-align: center; + flex: 1 1 100%; +} + +#highlight-current-count:has(#highlight-current:not(:empty)) { + cursor: pointer; +} + +#highlight-current:not(:empty) { + margin-inline: 0 8px; +} diff --git a/static/search/search-highlight.js b/static/search/search-highlight.js new file mode 100644 index 00000000..fd70a5ec --- /dev/null +++ b/static/search/search-highlight.js @@ -0,0 +1,390 @@ + +/** + * @typedef {{ref: string, score: number, doc: {id: string, header: string, context: string, contents: string}}} TextMatch + * @typedef {{run: (tokens: string[]) => string[]}} ElasticLunrPipeline + * @typedef {{bool?: "AND"|"OR", fields?:Record}} SearchConfig + * @typedef {{search: ((term: string, config: SearchConfig) => TextMatch[]), pipeline: ElasticLunrPipeline}|undefined|null} TextSearchIndex + * @typedef {{original: string, stem: string, start: number, end: number}} TextToken + */ + +/** Whether to search word prefixes or whole words in full-text searches. Should match the setting in search-box.js. + * @type {boolean} + */ +const expandHlMatches = true; + + +const searchIndex = /** @type {{searchIndex: TextSearchIndex}} */ ( + /** @type {unknown} */ (window) +).searchIndex; + + +/** Tokenizes the given string, computing stems. + * @param {string} text + * @return {TextToken[]} + */ +const tokenizeText = (text) => { + const toks = []; + const regex = /[^\s(),."“”—:]+/g; + let match; + while ((match = regex.exec(text)) !== null) { + let stems = searchIndex.pipeline.run([match[0]]); + for (const stem of stems) { + toks.push({ + original: match[0], + start: match.index, + end: match.index + match[0].length, + stem: stem.toLowerCase() + }); + } + } + return toks; +} + +function highlightSearchTerms() { + // Get search terms from URL query string + const urlParams = new URLSearchParams(window.location.search); + const searchQuery = urlParams.get('terms'); + + if (!searchQuery) { + return; // No search terms found + } + + // Stem the terms + const searchTerms = {}; + const regex = /\S+/g; + let match; + while ((match = regex.exec(searchQuery)) !== null) { + let stems = searchIndex.pipeline.run([match[0]]); + for (const stem of stems) { + searchTerms[stem.toLowerCase()] = match[0]; + } + } + + // Function to highlight text in a text node + function highlightTextNode(textNode) { + let text = textNode.textContent; + + const toks = tokenizeText(text); + for (const t of toks.reverse()) { + if (expandHlMatches) { + // We're doing full-text search with matching prefixes. Find the longest matching stem in the results and use it. + let bestStem = ""; + for (termStem in searchTerms) { + if (termStem.length <= bestStem.length) continue; + if (t.stem.startsWith(termStem)) bestStem = termStem; + } + if (bestStem.length > 0) { + text = text.slice(0, t.start) + `${text.slice(t.start, t.end)}` + text.slice(t.end); + } + } else { + // We're doing full-text search with whole words only. Look the stem up directly. + if (searchTerms.hasOwnProperty(t.stem)) { + text = text.slice(0, t.start) + `${text.slice(t.start, t.end)}` + text.slice(t.end); + } + } + } + + // Create a temporary container + const tempDiv = document.createElement('div'); + tempDiv.innerHTML = text; + + // Replace the text node with highlighted content + const fragment = document.createDocumentFragment(); + while (tempDiv.firstChild) { + fragment.appendChild(tempDiv.firstChild); + + } + const parent = textNode.parentNode; + parent.replaceChild(fragment, textNode); + parent.querySelectorAll('.text-search-results').forEach((e) => { + e.addEventListener('click', () => { + const i = allHighlights.indexOf(e); + if (i >= 0) { + currentHighlightIndex = i; + updateNavigationState(); + } + }); + }); + } + + /** Function to traverse DOM and find text nodes + * @param {any} node + */ + function traverseNodes(node) { + if (node.nodeType === Node.TEXT_NODE) { + highlightTextNode(node); + } else if (node.nodeType === Node.ELEMENT_NODE) { + // Skip script, style, and already highlighted elements + if (node.tagName && + !['SCRIPT', 'STYLE', 'NOSCRIPT'].includes(node.tagName.toUpperCase()) && + !node.classList.contains('text-search-results') && + // Don't highlight search terms in invisible hovers + !node.classList.contains('hover-info') && + // Don't highlight search terms in doc box labels + !(node.classList.contains('label') && node.parentNode && node.parentNode.classList.contains('namedocs'))) { + + // Process child nodes (in reverse order to handle DOM changes) + const children = Array.from(node.childNodes); + for (let i = children.length - 1; i >= 0; i--) { + traverseNodes(children[i]); + } + } + } + } + + // Start traversal from
+ document.querySelectorAll('main section').forEach(traverseNodes); + + // Update highlights array after highlighting + updateHighlightsArray(); +} + +// Function to remove all highlights +function removeHighlights() { + const highlightedElements = document.querySelectorAll('span.text-search-results'); + highlightedElements.forEach(span => { + const parent = span.parentNode; + parent.replaceChild(document.createTextNode(span.textContent), span); + parent.normalize(); // Merge adjacent text nodes + }); + updateHighlightsArray(); +} + +/** The index of the current highlight + * @type {number} + */ +let currentHighlightIndex = -1; +/** All highlight elements. + * @type {HTMLElement[]} + */ +let allHighlights = []; + +/** Update highlights array and reset navigation + */ +function updateHighlightsArray() { + allHighlights = Array.from(document.querySelectorAll('span.text-search-results')); + currentHighlightIndex = -1; + updateNavigationState(); +} + +/** Navigate to next highlight + */ +function nextHighlight() { + if (allHighlights.length === 0) return; + + currentHighlightIndex = (currentHighlightIndex + 1) % allHighlights.length; + scrollToHighlight(currentHighlightIndex); +} + +/** Navigate to previous highlight + */ +function prevHighlight() { + if (allHighlights.length === 0) return; + + currentHighlightIndex = currentHighlightIndex <= 0 ? + allHighlights.length - 1 : currentHighlightIndex - 1; + scrollToHighlight(currentHighlightIndex); +} + +/** Scroll to a specific highlight + * @param {number} index The index of the highlight element in allHighlights + */ +function scrollToHighlight(index) { + if (index >= 0 && index < allHighlights.length) { + // Ensure visibility by opening collapsed examples + let here = allHighlights[index]; + if (here) { + while (here = here.parentElement) { + if (here.nodeName.toLowerCase() == "details") { + here.setAttribute('open', 'open'); + break; + } + } + } + + // Scroll to it + allHighlights[index].scrollIntoView({ + behavior: 'smooth', + block: 'center' + }); + + updateNavigationState(); + } +} + +/** Update navigation button states based on the contents of the document. + */ +function updateNavigationState() { + const prevBtn = document.getElementById('highlight-prev'); + const nextBtn = document.getElementById('highlight-next'); + const countSpan = document.getElementById('highlight-count'); + const currentSpan = document.getElementById('highlight-current'); + const currentCount = document.getElementById('highlight-current-count'); + + if (prevBtn && nextBtn && countSpan) { + const hasHighlights = allHighlights.length > 0; + prevBtn.disabled = !hasHighlights; + nextBtn.disabled = !hasHighlights; + + if (hasHighlights && currentHighlightIndex >= 0) { + countSpan.textContent = `${currentHighlightIndex + 1}/${allHighlights.length}`; + currentSpan.textContent = allHighlights[currentHighlightIndex].textContent; + let resName = allHighlights[currentHighlightIndex].title; + resName = resName.charAt(0).toLowerCase() + resName.slice(1); + currentCount.title = 'Go to ' + resName; + document.querySelectorAll('.text-search-results').forEach((e) => e.classList.remove('focused')); + let here = allHighlights[currentHighlightIndex]; + here.classList.add('focused'); + while (here = here.parentElement) { + if (here.nodeName.toLowerCase() == "details") { + here.setAttribute('open', 'open'); + break; + } + } + } else { + countSpan.textContent = hasHighlights ? `0/${allHighlights.length}` : '0/0'; + currentSpan.textContent = ''; + currentCount.title = ''; + } + } +} + +/** Toggle highlights + */ +function toggleHighlights() { + const existingHighlights = document.querySelectorAll('span.text-search-results'); + if (existingHighlights.length > 0) { + removeHighlights(); + } else { + highlightSearchTerms(); + } +} + +/** Scroll to first highlight after a specific element + * @param {string} elementId + */ +function scrollToFirstHighlightAfter(elementId) { + let targetElement = document.getElementById(elementId); + if (!targetElement) { + targetElement = document.body; + } + + const highlights = document.querySelectorAll('span.text-search-results'); + if (highlights.length === 0) { + return false; + } + + // Find the first highlight that comes after the target element in document order + const targetPosition = targetElement.compareDocumentPosition ? + targetElement : null; + + if (!targetPosition) { + return false; + } + + for (let highlight of highlights) { + const position = targetElement.compareDocumentPosition(highlight); + // Check if highlight comes after target element + if (position & Node.DOCUMENT_POSITION_FOLLOWING) { + highlight.scrollIntoView({ + behavior: 'smooth', + block: 'center' + }); + currentHighlightIndex = allHighlights.indexOf(highlight); + updateNavigationState(); + return true; + } + } + + return false; +} + +/** Checks whether there's a search query in the URL */ +function hasSearchQuery() { + const urlParams = new URLSearchParams(window.location.search); + const searchQuery = urlParams.get('terms'); + return searchQuery && searchQuery.trim().length > 0; +} + +/** Creates control buttons (only if search query exists) + */ +function createControlButtons() { + if (!hasSearchQuery()) { + return; + } + + const container = document.createElement('div'); + container.id = 'highlight-controls'; + + // Previous button + const prevBtn = document.createElement('button'); + prevBtn.id = 'highlight-prev'; + prevBtn.textContent = '◀'; + prevBtn.title = 'Previous match'; + prevBtn.addEventListener('click', prevHighlight); + + const currentSpan = document.createElement('span'); + currentSpan.id = 'highlight-current'; + + // Count display + const countSpan = document.createElement('span'); + countSpan.id = 'highlight-count'; + countSpan.textContent = '0/0'; + + const currentCount = document.createElement('span'); + currentCount.id = 'highlight-current-count'; + currentCount.appendChild(currentSpan); + currentCount.appendChild(countSpan); + currentCount.addEventListener('click', () => scrollToHighlight(currentHighlightIndex)); + + // Next button + const nextBtn = document.createElement('button'); + nextBtn.id = 'highlight-next'; + nextBtn.textContent = '▶'; + nextBtn.title = 'Next match'; + nextBtn.addEventListener('click', nextHighlight); + + // Toggle button + const toggleBtn = document.createElement('button'); + toggleBtn.id = 'highlight-close'; + toggleBtn.textContent = '✖'; + toggleBtn.title = 'Close search'; + toggleBtn.addEventListener('click', toggleHighlights); + toggleBtn.addEventListener('click', () => container.remove()); + + container.appendChild(prevBtn); + container.appendChild(currentCount); + container.appendChild(nextBtn); + container.appendChild(toggleBtn); + + document.body.appendChild(container); +} + +// Run the highlighter when DOM is ready +if (document.readyState === 'loading') { + document.addEventListener('DOMContentLoaded', function() { + highlightSearchTerms(); + createControlButtons(); + updateHighlightsArray(); + + // Check for hash in URL and scroll to first highlight after it + if (window.location.hash) { + const elementId = window.location.hash.substring(1); + setTimeout(() => { + scrollToFirstHighlightAfter(elementId); + }, 100); // Small delay to ensure highlighting is complete + } + }); +} else { + highlightSearchTerms(); + createControlButtons(); + updateHighlightsArray(); + + // Check for hash in URL and scroll to first highlight after it + if (window.location.hash) { + const elementId = window.location.hash.substring(1); + setTimeout(() => { + scrollToFirstHighlightAfter(elementId); + }, 100); // Small delay to ensure highlighting is complete + } +} diff --git a/static/search/search-init.js b/static/search/search-init.js index a1294695..d38ee26c 100644 --- a/static/search/search-init.js +++ b/static/search/search-init.js @@ -19,7 +19,7 @@ const searchHTML = `
class="cb_edit" contenteditable="true" role="searchbox" - placeholder="Jump to..." + placeholder="${window.searchIndex ? 'Search...' : 'Jump to...'}" aria-autocomplete="list" aria-expanded="false" aria-controls="cb1-listbox"