about summary refs log tree commit diff
path: root/nixpkgs/doc
diff options
context:
space:
mode:
Diffstat (limited to 'nixpkgs/doc')
-rw-r--r--nixpkgs/doc/README.md348
-rw-r--r--nixpkgs/doc/anchor-use.js3
-rw-r--r--nixpkgs/doc/anchor.min.js9
-rw-r--r--nixpkgs/doc/build-helpers.md28
-rw-r--r--nixpkgs/doc/build-helpers/fetchers.chapter.md291
-rw-r--r--nixpkgs/doc/build-helpers/images.md12
-rw-r--r--nixpkgs/doc/build-helpers/images/appimagetools.section.md167
-rw-r--r--nixpkgs/doc/build-helpers/images/binarycache.section.md58
-rw-r--r--nixpkgs/doc/build-helpers/images/dockertools.section.md1587
-rw-r--r--nixpkgs/doc/build-helpers/images/makediskimage.section.md108
-rw-r--r--nixpkgs/doc/build-helpers/images/ocitools.section.md104
-rw-r--r--nixpkgs/doc/build-helpers/images/portableservice.section.md174
-rw-r--r--nixpkgs/doc/build-helpers/special.md12
-rw-r--r--nixpkgs/doc/build-helpers/special/checkpoint-build.section.md43
-rw-r--r--nixpkgs/doc/build-helpers/special/fakenss.section.md77
-rw-r--r--nixpkgs/doc/build-helpers/special/fhs-environments.section.md56
-rw-r--r--nixpkgs/doc/build-helpers/special/makesetuphook.section.md37
-rw-r--r--nixpkgs/doc/build-helpers/special/mkshell.section.md41
-rw-r--r--nixpkgs/doc/build-helpers/special/vm-tools.section.md148
-rw-r--r--nixpkgs/doc/build-helpers/testers.chapter.md280
-rw-r--r--nixpkgs/doc/build-helpers/trivial-build-helpers.chapter.md710
-rw-r--r--nixpkgs/doc/common.nix4
-rw-r--r--nixpkgs/doc/contributing.md10
-rw-r--r--nixpkgs/doc/contributing/coding-conventions.chapter.md63
-rw-r--r--nixpkgs/doc/contributing/contributing-to-documentation.chapter.md11
-rw-r--r--nixpkgs/doc/contributing/quick-start.chapter.md3
-rw-r--r--nixpkgs/doc/contributing/reviewing-contributions.chapter.md35
-rw-r--r--nixpkgs/doc/contributing/submitting-changes.chapter.md88
-rw-r--r--nixpkgs/doc/contributing/vulnerability-roundup.chapter.md11
-rw-r--r--nixpkgs/doc/default.nix176
-rw-r--r--nixpkgs/doc/development.md10
-rw-r--r--nixpkgs/doc/development/opening-issues.chapter.md7
-rw-r--r--nixpkgs/doc/doc-support/lib-function-docs.nix41
-rw-r--r--nixpkgs/doc/doc-support/lib-function-locations.nix75
-rw-r--r--nixpkgs/doc/functions.md11
-rw-r--r--nixpkgs/doc/functions/debug.section.md5
-rw-r--r--nixpkgs/doc/functions/generators.section.md57
-rw-r--r--nixpkgs/doc/functions/library.md.in5
-rw-r--r--nixpkgs/doc/functions/library/.gitkeep0
-rw-r--r--nixpkgs/doc/functions/nix-gitignore.section.md56
-rw-r--r--nixpkgs/doc/functions/prefer-remote-fetch.section.md17
-rw-r--r--nixpkgs/doc/hooks/autoconf.section.md3
-rw-r--r--nixpkgs/doc/hooks/automake.section.md3
-rw-r--r--nixpkgs/doc/hooks/autopatchelf.section.md11
-rw-r--r--nixpkgs/doc/hooks/bmake.section.md7
-rw-r--r--nixpkgs/doc/hooks/breakpoint.section.md17
-rw-r--r--nixpkgs/doc/hooks/cmake.section.md3
-rw-r--r--nixpkgs/doc/hooks/gdk-pixbuf.section.md3
-rw-r--r--nixpkgs/doc/hooks/ghc.section.md3
-rw-r--r--nixpkgs/doc/hooks/gnome.section.md3
-rw-r--r--nixpkgs/doc/hooks/index.md35
-rw-r--r--nixpkgs/doc/hooks/installShellFiles.section.md27
-rw-r--r--nixpkgs/doc/hooks/libiconv.section.md3
-rw-r--r--nixpkgs/doc/hooks/libxml2.section.md3
-rw-r--r--nixpkgs/doc/hooks/meson.section.md83
-rw-r--r--nixpkgs/doc/hooks/mpi-check-hook.section.md25
-rw-r--r--nixpkgs/doc/hooks/ninja.section.md5
-rw-r--r--nixpkgs/doc/hooks/patch-rc-path-hooks.section.md50
-rw-r--r--nixpkgs/doc/hooks/perl.section.md3
-rw-r--r--nixpkgs/doc/hooks/pkg-config.section.md3
-rw-r--r--nixpkgs/doc/hooks/postgresql-test-hook.section.md66
-rw-r--r--nixpkgs/doc/hooks/python.section.md3
-rw-r--r--nixpkgs/doc/hooks/scons.section.md3
-rw-r--r--nixpkgs/doc/hooks/tetex-tex-live.section.md3
-rw-r--r--nixpkgs/doc/hooks/unzip.section.md3
-rw-r--r--nixpkgs/doc/hooks/validatePkgConfig.section.md3
-rw-r--r--nixpkgs/doc/hooks/waf.section.md58
-rw-r--r--nixpkgs/doc/hooks/xcbuild.section.md3
-rw-r--r--nixpkgs/doc/hooks/zig.section.md63
-rw-r--r--nixpkgs/doc/languages-frameworks/agda.section.md257
-rw-r--r--nixpkgs/doc/languages-frameworks/android.section.md351
-rw-r--r--nixpkgs/doc/languages-frameworks/beam.section.md396
-rw-r--r--nixpkgs/doc/languages-frameworks/bower.section.md125
-rw-r--r--nixpkgs/doc/languages-frameworks/chicken.section.md80
-rw-r--r--nixpkgs/doc/languages-frameworks/coq.section.md151
-rw-r--r--nixpkgs/doc/languages-frameworks/crystal.section.md77
-rw-r--r--nixpkgs/doc/languages-frameworks/cuda.section.md151
-rw-r--r--nixpkgs/doc/languages-frameworks/cuelang.section.md93
-rw-r--r--nixpkgs/doc/languages-frameworks/dart.section.md136
-rw-r--r--nixpkgs/doc/languages-frameworks/dhall.section.md472
-rw-r--r--nixpkgs/doc/languages-frameworks/dotnet.section.md260
-rw-r--r--nixpkgs/doc/languages-frameworks/emscripten.section.md167
-rw-r--r--nixpkgs/doc/languages-frameworks/gnome.section.md220
-rw-r--r--nixpkgs/doc/languages-frameworks/go.section.md288
-rw-r--r--nixpkgs/doc/languages-frameworks/haskell.section.md1311
-rw-r--r--nixpkgs/doc/languages-frameworks/hy.section.md31
-rw-r--r--nixpkgs/doc/languages-frameworks/idris.section.md143
-rw-r--r--nixpkgs/doc/languages-frameworks/idris2.section.md47
-rw-r--r--nixpkgs/doc/languages-frameworks/index.md47
-rw-r--r--nixpkgs/doc/languages-frameworks/ios.section.md225
-rw-r--r--nixpkgs/doc/languages-frameworks/java.section.md131
-rw-r--r--nixpkgs/doc/languages-frameworks/javascript.section.md446
-rw-r--r--nixpkgs/doc/languages-frameworks/julia.section.md69
-rw-r--r--nixpkgs/doc/languages-frameworks/lisp.section.md301
-rw-r--r--nixpkgs/doc/languages-frameworks/lua.section.md258
-rw-r--r--nixpkgs/doc/languages-frameworks/maven.section.md439
-rw-r--r--nixpkgs/doc/languages-frameworks/nim.section.md125
-rw-r--r--nixpkgs/doc/languages-frameworks/ocaml.section.md134
-rw-r--r--nixpkgs/doc/languages-frameworks/octave.section.md92
-rw-r--r--nixpkgs/doc/languages-frameworks/perl.section.md171
-rw-r--r--nixpkgs/doc/languages-frameworks/php.section.md293
-rw-r--r--nixpkgs/doc/languages-frameworks/pkg-config.section.md51
-rw-r--r--nixpkgs/doc/languages-frameworks/python.section.md2151
-rw-r--r--nixpkgs/doc/languages-frameworks/qt.section.md81
-rw-r--r--nixpkgs/doc/languages-frameworks/r.section.md127
-rw-r--r--nixpkgs/doc/languages-frameworks/ruby.section.md296
-rw-r--r--nixpkgs/doc/languages-frameworks/rust.section.md1082
-rw-r--r--nixpkgs/doc/languages-frameworks/swift.section.md184
-rw-r--r--nixpkgs/doc/languages-frameworks/texlive.section.md230
-rw-r--r--nixpkgs/doc/languages-frameworks/titanium.section.md110
-rw-r--r--nixpkgs/doc/languages-frameworks/vim.section.md275
-rw-r--r--nixpkgs/doc/lib.md6
-rw-r--r--nixpkgs/doc/manpage-urls.json324
-rw-r--r--nixpkgs/doc/manual.md.in15
-rw-r--r--nixpkgs/doc/module-system/module-system.chapter.md105
-rw-r--r--nixpkgs/doc/old/cross.txt329
-rw-r--r--nixpkgs/doc/packages/cataclysm-dda.section.md129
-rw-r--r--nixpkgs/doc/packages/citrix.section.md32
-rw-r--r--nixpkgs/doc/packages/darwin-builder.section.md190
-rw-r--r--nixpkgs/doc/packages/dlib.section.md13
-rw-r--r--nixpkgs/doc/packages/eclipse.section.md68
-rw-r--r--nixpkgs/doc/packages/elm.section.md11
-rw-r--r--nixpkgs/doc/packages/emacs.section.md118
-rw-r--r--nixpkgs/doc/packages/etc-files.section.md18
-rw-r--r--nixpkgs/doc/packages/firefox.section.md55
-rw-r--r--nixpkgs/doc/packages/fish.section.md50
-rw-r--r--nixpkgs/doc/packages/fuse.section.md45
-rw-r--r--nixpkgs/doc/packages/ibus.section.md40
-rw-r--r--nixpkgs/doc/packages/index.md28
-rw-r--r--nixpkgs/doc/packages/kakoune.section.md9
-rw-r--r--nixpkgs/doc/packages/linux.section.md113
-rw-r--r--nixpkgs/doc/packages/locales.section.md5
-rw-r--r--nixpkgs/doc/packages/nginx.section.md11
-rw-r--r--nixpkgs/doc/packages/opengl.section.md15
-rw-r--r--nixpkgs/doc/packages/shell-helpers.section.md12
-rw-r--r--nixpkgs/doc/packages/steam.section.md63
-rw-r--r--nixpkgs/doc/packages/urxvt.section.md73
-rw-r--r--nixpkgs/doc/packages/weechat.section.md85
-rw-r--r--nixpkgs/doc/packages/xorg.section.md34
-rw-r--r--nixpkgs/doc/preface.chapter.md50
-rw-r--r--nixpkgs/doc/shell.nix20
-rw-r--r--nixpkgs/doc/stdenv.md9
-rw-r--r--nixpkgs/doc/stdenv/cross-compilation.chapter.md264
-rw-r--r--nixpkgs/doc/stdenv/meta.chapter.md286
-rw-r--r--nixpkgs/doc/stdenv/multiple-output.chapter.md96
-rw-r--r--nixpkgs/doc/stdenv/platform-notes.chapter.md67
-rw-r--r--nixpkgs/doc/stdenv/stdenv.chapter.md1664
-rw-r--r--nixpkgs/doc/style.css416
-rwxr-xr-xnixpkgs/doc/tests/manpage-urls.py109
-rw-r--r--nixpkgs/doc/using-nixpkgs.md8
-rw-r--r--nixpkgs/doc/using/configuration.chapter.md372
-rw-r--r--nixpkgs/doc/using/overlays.chapter.md167
-rw-r--r--nixpkgs/doc/using/overrides.chapter.md134
-rw-r--r--nixpkgs/doc/using/platform-support.chapter.md18
154 files changed, 23223 insertions, 0 deletions
diff --git a/nixpkgs/doc/README.md b/nixpkgs/doc/README.md
new file mode 100644
index 000000000000..1e9305d040ba
--- /dev/null
+++ b/nixpkgs/doc/README.md
@@ -0,0 +1,348 @@
+# Contributing to the Nixpkgs reference manual
+
+This directory houses the sources files for the Nixpkgs reference manual.
+
+Going forward, it should only contain [reference](https://nix.dev/contributing/documentation/diataxis#reference) documentation.
+For tutorials, guides and explanations, contribute to <https://nix.dev/> instead.
+
+For documentation only relevant for contributors, use Markdown files and code comments in the source code.
+
+Rendered documentation:
+- [Unstable (from master)](https://nixos.org/manual/nixpkgs/unstable/)
+- [Stable (from latest release)](https://nixos.org/manual/nixpkgs/stable/)
+
+The rendering tool is [nixos-render-docs](../pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs), sometimes abbreviated `nrd`.
+
+## Contributing to this documentation
+
+You can quickly check your edits with `nix-build`:
+
+```ShellSession
+$ cd /path/to/nixpkgs
+$ nix-build doc
+```
+
+If the build succeeds, the manual will be in `./result/share/doc/nixpkgs/manual.html`.
+
+### devmode
+
+The shell in the manual source directory makes available a command, `devmode`.
+It is a daemon, that:
+1. watches the manual's source for changes and when they occur — rebuilds
+2. HTTP serves the manual, injecting a script that triggers reload on changes
+3. opens the manual in the default browser
+
+## Syntax
+
+As per [RFC 0072](https://github.com/NixOS/rfcs/pull/72), all new documentation content should be written in [CommonMark](https://commonmark.org/) Markdown dialect.
+
+Additional syntax extensions are available, all of which can be used in NixOS option documentation. The following extensions are currently used:
+
+#### Tables
+
+Tables, using the [GitHub-flavored Markdown syntax](https://github.github.com/gfm/#tables-extension-).
+
+#### Anchors
+
+Explicitly defined **anchors** on headings, to allow linking to sections. These should be always used, to ensure the anchors can be linked even when the heading text changes, and to prevent conflicts between [automatically assigned identifiers](https://github.com/jgm/commonmark-hs/blob/master/commonmark-extensions/test/auto_identifiers.md).
+
+It uses the widely compatible [header attributes](https://github.com/jgm/commonmark-hs/blob/master/commonmark-extensions/test/attributes.md) syntax:
+
+```markdown
+## Syntax {#sec-contributing-markup}
+```
+
+> [!Note]
+> NixOS option documentation does not support headings in general.
+
+#### Inline Anchors
+
+Allow linking arbitrary place in the text (e.g. individual list items, sentences…).
+
+They are defined using a hybrid of the link syntax with the attributes syntax known from headings, called [bracketed spans](https://github.com/jgm/commonmark-hs/blob/master/commonmark-extensions/test/bracketed_spans.md):
+
+```markdown
+- []{#ssec-gnome-hooks-glib} `glib` setup hook will populate `GSETTINGS_SCHEMAS_PATH` and then `wrapGAppsHook` will prepend it to `XDG_DATA_DIRS`.
+```
+
+#### Automatic links
+
+If you **omit a link text** for a link pointing to a section, the text will be substituted automatically. For example `[](#chap-contributing)`.
+
+This syntax is taken from [MyST](https://myst-parser.readthedocs.io/en/latest/using/syntax.html#targets-and-cross-referencing).
+
+
+#### HTML
+
+Inlining HTML is not allowed. Parts of the documentation gets rendered to various non-HTML formats, such as man pages in the case of NixOS manual.
+
+#### Roles
+
+If you want to link to a man page, you can use `` {manpage}`nix.conf(5)` ``. The references will turn into links when a mapping exists in [`doc/manpage-urls.json`](./manpage-urls.json).
+
+A few markups for other kinds of literals are also available:
+
+- `` {command}`rm -rfi` ``
+- `` {env}`XDG_DATA_DIRS` ``
+- `` {file}`/etc/passwd` ``
+- `` {option}`networking.useDHCP` ``
+- `` {var}`/etc/passwd` ``
+
+These literal kinds are used mostly in NixOS option documentation.
+
+This syntax is taken from [MyST](https://myst-parser.readthedocs.io/en/latest/syntax/syntax.html#roles-an-in-line-extension-point). Though, the feature originates from [reStructuredText](https://www.sphinx-doc.org/en/master/usage/restructuredtext/roles.html#role-manpage) with slightly different syntax.
+
+#### Admonitions
+
+Set off from the text to bring attention to something.
+
+It uses pandoc’s [fenced `div`s syntax](https://github.com/jgm/commonmark-hs/blob/master/commonmark-extensions/test/fenced_divs.md):
+
+```markdown
+::: {.warning}
+This is a warning
+:::
+```
+
+The following are supported:
+
+- [`caution`](https://tdg.docbook.org/tdg/5.0/caution.html)
+- [`important`](https://tdg.docbook.org/tdg/5.0/important.html)
+- [`note`](https://tdg.docbook.org/tdg/5.0/note.html)
+- [`tip`](https://tdg.docbook.org/tdg/5.0/tip.html)
+- [`warning`](https://tdg.docbook.org/tdg/5.0/warning.html)
+- [`example`](https://tdg.docbook.org/tdg/5.0/example.html)
+
+Example admonitions require a title to work.
+If you don't provide one, the manual won't be built.
+
+```markdown
+::: {.example #ex-showing-an-example}
+
+# Title for this example
+
+Text for the example.
+:::
+```
+
+#### [Definition lists](https://github.com/jgm/commonmark-hs/blob/master/commonmark-extensions/test/definition_lists.md)
+
+For defining a group of terms:
+
+```markdown
+pear
+:   green or yellow bulbous fruit
+
+watermelon
+:   green fruit with red flesh
+```
+
+## Commit conventions
+
+- Make sure you read about the [commit conventions](../CONTRIBUTING.md#commit-conventions) common to Nixpkgs as a whole.
+
+- If creating a commit purely for documentation changes, format the commit message in the following way:
+
+  ```
+  doc: (documentation summary)
+
+  (Motivation for change, relevant links, additional information.)
+  ```
+
+  Examples:
+
+  * doc: update the kernel config documentation to use `nix-shell`
+  * doc: add information about `nix-update-script`
+
+    Closes #216321.
+
+- If the commit contains more than just documentation changes, follow the commit message format relevant for the rest of the changes.
+
+## Documentation conventions
+
+In an effort to keep the Nixpkgs manual in a consistent style, please follow the conventions below, unless they prevent you from properly documenting something.
+In that case, please open an issue about the particular documentation convention and tag it with a "needs: documentation" label.
+When needed, each convention explain why it exists, so you can make a decision whether to follow it or not based on your particular case.
+Note that these conventions are about the **structure** of the manual (and its source files), not about the content that goes in it.
+You, as the writer of documentation, are still in charge of its content.
+
+- Put each sentence in its own line.
+  This makes reviews and suggestions much easier, since GitHub's review system is based on lines.
+  It also helps identifying long sentences at a glance.
+
+- Use the [admonition syntax](#admonitions) for callouts and examples.
+
+- Provide at least one example per function, and make examples self-contained.
+  This is easier to understand for beginners.
+  It also helps with testing that it actually works – especially once we introduce automation.
+
+  Example code should be such that it can be passed to `pkgs.callPackage`.
+  Instead of something like:
+
+  ```nix
+  pkgs.dockerTools.buildLayeredImage {
+    name = "hello";
+    contents = [ pkgs.hello ];
+  }
+  ```
+
+  Write something like:
+
+  ```nix
+  { dockerTools, hello }:
+  dockerTools.buildLayeredImage {
+    name = "hello";
+    contents = [ hello ];
+  }
+  ```
+
+- When showing inputs/outputs of any [REPL](https://en.wikipedia.org/wiki/Read%E2%80%93eval%E2%80%93print_loop), such as a shell or the Nix REPL, use a format as you'd see in the REPL, while trying to visually separate inputs from outputs.
+  This means that for a shell, you should use a format like the following:
+  ```shell
+  $ nix-build -A hello '<nixpkgs>' \
+    --option require-sigs false \
+    --option trusted-substituters file:///tmp/hello-cache \
+    --option substituters file:///tmp/hello-cache
+  /nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1
+  ```
+  Note how the input is preceded by `$` on the first line and indented on subsequent lines, and how the output is provided as you'd see on the shell.
+
+  For the Nix REPL, you should use a format like the following:
+  ```shell
+  nix-repl> builtins.attrNames { a = 1; b = 2; }
+  [ "a" "b" ]
+  ```
+  Note how the input is preceded by `nix-repl>` and the output is provided as you'd see on the Nix REPL.
+
+- When documenting functions or anything that has inputs/outputs and example usage, use nested headings to clearly separate inputs, outputs, and examples.
+  Keep examples as the last nested heading, and link to the examples wherever applicable in the documentation.
+
+  The purpose of this convention is to provide a familiar structure for navigating the manual, so any reader can expect to find content related to inputs in an "inputs" heading, examples in an "examples" heading, and so on.
+  An example:
+  ```
+  ## buildImage
+
+  Some explanation about the function here.
+  Describe a particular scenario, and point to [](#ex-dockerTools-buildImage), which is an example demonstrating it.
+
+  ### Inputs
+
+  Documentation for the inputs of `buildImage`.
+  Perhaps even point to [](#ex-dockerTools-buildImage) again when talking about something specifically linked to it.
+
+  ### Passthru outputs
+
+  Documentation for any passthru outputs of `buildImage`.
+
+  ### Examples
+
+  Note that this is the last nested heading in the `buildImage` section.
+
+  :::{.example #ex-dockerTools-buildImage}
+
+  # Using `buildImage`
+
+  Example of how to use `buildImage` goes here.
+
+  :::
+  ```
+
+- Use [definition lists](#definition-lists) to document function arguments, and the attributes of such arguments as well as their [types](https://nixos.org/manual/nix/stable/language/values).
+  For example:
+
+  ```markdown
+  # pkgs.coolFunction
+
+  Description of what `coolFunction` does.
+
+  ## Inputs
+
+  `coolFunction` expects a single argument which should be an attribute set, with the following possible attributes:
+
+  `name` (String)
+
+  : The name of the resulting image.
+
+  `tag` (String; _optional_)
+
+  : Tag of the generated image.
+
+    _Default:_ the output path's hash.
+  ```
+
+#### Examples
+
+To define a referenceable figure use the following fencing:
+
+```markdown
+:::{.example #an-attribute-set-example}
+# An attribute set example
+
+You can add text before
+
+    ```nix
+    { a = 1; b = 2;}
+    ```
+
+and after code fencing
+:::
+```
+
+Defining examples through the `example` fencing class adds them to a "List of Examples" section after the Table of Contents.
+Though this is not shown in the rendered documentation on nixos.org.
+
+#### Figures
+
+To define a referencable figure use the following fencing:
+
+```markdown
+::: {.figure #nixos-logo}
+# NixOS Logo
+![NixOS logo](./nixos_logo.png)
+:::
+```
+
+Defining figures through the `figure` fencing class adds them to a `List  of Figures` after the `Table of Contents`.
+Though this is not shown in the rendered documentation on nixos.org.
+
+#### Footnotes
+
+To add a foonote explanation, use the following syntax:
+
+```markdown
+Sometimes it's better to add context [^context] in a footnote.
+
+[^context]: This explanation will be rendered at the end of the chapter.
+```
+
+#### Inline comments
+
+Inline comments are supported with following syntax:
+
+```markdown
+<!-- This is an inline comment -->
+```
+
+The comments will not be rendered in the rendered HTML.
+
+#### Link reference definitions
+
+Links can reference a label, for example, to make the link target reusable:
+
+```markdown
+::: {.note}
+Reference links can also be used to [shorten URLs][url-id] and keep the markdown readable.
+:::
+
+[url-id]: https://github.com/NixOS/nixpkgs/blob/19d4f7dc485f74109bd66ef74231285ff797a823/doc/README.md
+```
+
+This syntax is taken from [CommonMark](https://spec.commonmark.org/0.30/#link-reference-definitions).
+
+#### Typographic replacements
+
+Typographic replacements are enabled. Check the [list of possible replacement patterns check](https://github.com/executablebooks/markdown-it-py/blob/3613e8016ecafe21709471ee0032a90a4157c2d1/markdown_it/rules_core/replacements.py#L1-L15).
+
+## Getting help
+
+If you need documentation-specific help or reviews, ping [@NixOS/documentation-reviewers](https://github.com/orgs/nixos/teams/documentation-reviewers) on your pull request.
diff --git a/nixpkgs/doc/anchor-use.js b/nixpkgs/doc/anchor-use.js
new file mode 100644
index 000000000000..a45c4e2be68d
--- /dev/null
+++ b/nixpkgs/doc/anchor-use.js
@@ -0,0 +1,3 @@
+document.addEventListener('DOMContentLoaded', function(event) {
+  anchors.add('h1[id]:not(div.note h1, div.warning h1, div.tip h1, div.caution h1, div.important h1), h2[id]:not(div.note h2, div.warning h2, div.tip h2, div.caution h2, div.important h2), h3[id]:not(div.note h3, div.warning h3, div.tip h3, div.caution h3, div.important h3), h4[id]:not(div.note h4, div.warning h4, div.tip h4, div.caution h4, div.important h4), h5[id]:not(div.note h5, div.warning h5, div.tip h5, div.caution h5, div.important h5), h6[id]:not(div.note h6, div.warning h6, div.tip h6, div.caution h6, div.important h6)');
+});
diff --git a/nixpkgs/doc/anchor.min.js b/nixpkgs/doc/anchor.min.js
new file mode 100644
index 000000000000..00f80c058f6d
--- /dev/null
+++ b/nixpkgs/doc/anchor.min.js
@@ -0,0 +1,9 @@
+// @license magnet:?xt=urn:btih:d3d9a9a6595521f9666a5e94cc830dab83b65699&dn=expat.txt Expat
+//
+// AnchorJS - v5.0.0 - 2023-01-18
+// https://www.bryanbraun.com/anchorjs/
+// Copyright (c) 2023 Bryan Braun; Licensed MIT
+//
+// @license magnet:?xt=urn:btih:d3d9a9a6595521f9666a5e94cc830dab83b65699&dn=expat.txt Expat
+!function(A,e){"use strict";"function"==typeof define&&define.amd?define([],e):"object"==typeof module&&module.exports?module.exports=e():(A.AnchorJS=e(),A.anchors=new A.AnchorJS)}(globalThis,function(){"use strict";return function(A){function u(A){A.icon=Object.prototype.hasOwnProperty.call(A,"icon")?A.icon:"",A.visible=Object.prototype.hasOwnProperty.call(A,"visible")?A.visible:"hover",A.placement=Object.prototype.hasOwnProperty.call(A,"placement")?A.placement:"right",A.ariaLabel=Object.prototype.hasOwnProperty.call(A,"ariaLabel")?A.ariaLabel:"Anchor",A.class=Object.prototype.hasOwnProperty.call(A,"class")?A.class:"",A.base=Object.prototype.hasOwnProperty.call(A,"base")?A.base:"",A.truncate=Object.prototype.hasOwnProperty.call(A,"truncate")?Math.floor(A.truncate):64,A.titleText=Object.prototype.hasOwnProperty.call(A,"titleText")?A.titleText:""}function d(A){var e;if("string"==typeof A||A instanceof String)e=[].slice.call(document.querySelectorAll(A));else{if(!(Array.isArray(A)||A instanceof NodeList))throw new TypeError("The selector provided to AnchorJS was invalid.");e=[].slice.call(A)}return e}this.options=A||{},this.elements=[],u(this.options),this.add=function(A){var e,t,o,i,n,s,a,r,l,c,h,p=[];if(u(this.options),0!==(e=d(A=A||"h2, h3, h4, h5, h6")).length){for(null===document.head.querySelector("style.anchorjs")&&((A=document.createElement("style")).className="anchorjs",A.appendChild(document.createTextNode("")),void 0===(h=document.head.querySelector('[rel="stylesheet"],style'))?document.head.appendChild(A):document.head.insertBefore(A,h),A.sheet.insertRule(".anchorjs-link{opacity:0;text-decoration:none;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}",A.sheet.cssRules.length),A.sheet.insertRule(":hover>.anchorjs-link,.anchorjs-link:focus{opacity:1}",A.sheet.cssRules.length),A.sheet.insertRule("[data-anchorjs-icon]::after{content:attr(data-anchorjs-icon)}",A.sheet.cssRules.length),A.sheet.insertRule('@font-face{font-family:anchorjs-icons;src:url(data:n/a;base64,AAEAAAALAIAAAwAwT1MvMg8yG2cAAAE4AAAAYGNtYXDp3gC3AAABpAAAAExnYXNwAAAAEAAAA9wAAAAIZ2x5ZlQCcfwAAAH4AAABCGhlYWQHFvHyAAAAvAAAADZoaGVhBnACFwAAAPQAAAAkaG10eASAADEAAAGYAAAADGxvY2EACACEAAAB8AAAAAhtYXhwAAYAVwAAARgAAAAgbmFtZQGOH9cAAAMAAAAAunBvc3QAAwAAAAADvAAAACAAAQAAAAEAAHzE2p9fDzz1AAkEAAAAAADRecUWAAAAANQA6R8AAAAAAoACwAAAAAgAAgAAAAAAAAABAAADwP/AAAACgAAA/9MCrQABAAAAAAAAAAAAAAAAAAAAAwABAAAAAwBVAAIAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAAMCQAGQAAUAAAKZAswAAACPApkCzAAAAesAMwEJAAAAAAAAAAAAAAAAAAAAARAAAAAAAAAAAAAAAAAAAAAAQAAg//0DwP/AAEADwABAAAAAAQAAAAAAAAAAAAAAIAAAAAAAAAIAAAACgAAxAAAAAwAAAAMAAAAcAAEAAwAAABwAAwABAAAAHAAEADAAAAAIAAgAAgAAACDpy//9//8AAAAg6cv//f///+EWNwADAAEAAAAAAAAAAAAAAAAACACEAAEAAAAAAAAAAAAAAAAxAAACAAQARAKAAsAAKwBUAAABIiYnJjQ3NzY2MzIWFxYUBwcGIicmNDc3NjQnJiYjIgYHBwYUFxYUBwYGIwciJicmNDc3NjIXFhQHBwYUFxYWMzI2Nzc2NCcmNDc2MhcWFAcHBgYjARQGDAUtLXoWOR8fORYtLTgKGwoKCjgaGg0gEhIgDXoaGgkJBQwHdR85Fi0tOAobCgoKOBoaDSASEiANehoaCQkKGwotLXoWOR8BMwUFLYEuehYXFxYugC44CQkKGwo4GkoaDQ0NDXoaShoKGwoFBe8XFi6ALjgJCQobCjgaShoNDQ0NehpKGgobCgoKLYEuehYXAAAADACWAAEAAAAAAAEACAAAAAEAAAAAAAIAAwAIAAEAAAAAAAMACAAAAAEAAAAAAAQACAAAAAEAAAAAAAUAAQALAAEAAAAAAAYACAAAAAMAAQQJAAEAEAAMAAMAAQQJAAIABgAcAAMAAQQJAAMAEAAMAAMAAQQJAAQAEAAMAAMAAQQJAAUAAgAiAAMAAQQJAAYAEAAMYW5jaG9yanM0MDBAAGEAbgBjAGgAbwByAGoAcwA0ADAAMABAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAH//wAP) format("truetype")}',A.sheet.cssRules.length)),h=document.querySelectorAll("[id]"),t=[].map.call(h,function(A){return A.id}),i=0;i<e.length;i++)if(this.hasAnchorJSLink(e[i]))p.push(i);else{if(e[i].hasAttribute("id"))o=e[i].getAttribute("id");else if(e[i].hasAttribute("data-anchor-id"))o=e[i].getAttribute("data-anchor-id");else{for(r=a=this.urlify(e[i].textContent),s=0;n=t.indexOf(r=void 0!==n?a+"-"+s:r),s+=1,-1!==n;);n=void 0,t.push(r),e[i].setAttribute("id",r),o=r}(l=document.createElement("a")).className="anchorjs-link "+this.options.class,l.setAttribute("aria-label",this.options.ariaLabel),l.setAttribute("data-anchorjs-icon",this.options.icon),this.options.titleText&&(l.title=this.options.titleText),c=document.querySelector("base")?window.location.pathname+window.location.search:"",c=this.options.base||c,l.href=c+"#"+o,"always"===this.options.visible&&(l.style.opacity="1"),""===this.options.icon&&(l.style.font="1em/1 anchorjs-icons","left"===this.options.placement)&&(l.style.lineHeight="inherit"),"left"===this.options.placement?(l.style.position="absolute",l.style.marginLeft="-1.25em",l.style.paddingRight=".25em",l.style.paddingLeft=".25em",e[i].insertBefore(l,e[i].firstChild)):(l.style.marginLeft=".1875em",l.style.paddingRight=".1875em",l.style.paddingLeft=".1875em",e[i].appendChild(l))}for(i=0;i<p.length;i++)e.splice(p[i]-i,1);this.elements=this.elements.concat(e)}return this},this.remove=function(A){for(var e,t,o=d(A),i=0;i<o.length;i++)(t=o[i].querySelector(".anchorjs-link"))&&(-1!==(e=this.elements.indexOf(o[i]))&&this.elements.splice(e,1),o[i].removeChild(t));return this},this.removeAll=function(){this.remove(this.elements)},this.urlify=function(A){var e=document.createElement("textarea");return e.innerHTML=A,A=e.value,this.options.truncate||u(this.options),A.trim().replace(/'/gi,"").replace(/[& +$,:;=?@"#{}|^~[`%!'<>\]./()*\\\n\t\b\v\u00A0]/g,"-").replace(/-{2,}/g,"-").substring(0,this.options.truncate).replace(/^-+|-+$/gm,"").toLowerCase()},this.hasAnchorJSLink=function(A){var e=A.firstChild&&-1<(" "+A.firstChild.className+" ").indexOf(" anchorjs-link "),A=A.lastChild&&-1<(" "+A.lastChild.className+" ").indexOf(" anchorjs-link ");return e||A||!1}}});
+// @license-end
diff --git a/nixpkgs/doc/build-helpers.md b/nixpkgs/doc/build-helpers.md
new file mode 100644
index 000000000000..06737e166760
--- /dev/null
+++ b/nixpkgs/doc/build-helpers.md
@@ -0,0 +1,28 @@
+# Build helpers {#part-builders}
+
+A build helper is a function that produces derivations.
+
+:::{.warning}
+This is not to be confused with the [`builder` argument of the Nix `derivation` primitive](https://nixos.org/manual/nix/unstable/language/derivations.html), which refers to the executable that produces the build result, or [remote builder](https://nixos.org/manual/nix/stable/advanced-topics/distributed-builds.html), which refers to a remote  machine that could run such an executable.
+:::
+
+Such a function is usually designed to abstract over a typical workflow for a given programming language or framework.
+This allows declaring a build recipe by setting a limited number of options relevant to the particular use case instead of using the `derivation` function directly.
+
+[`stdenv.mkDerivation`](#part-stdenv) is the most widely used build helper, and serves as a basis for many others.
+In addition, it offers various options to customize parts of the builds.
+
+There is no uniform interface for build helpers.
+[Trivial build helpers](#chap-trivial-builders) and [fetchers](#chap-pkgs-fetchers) have various input types for convenience.
+[Language- or framework-specific build helpers](#chap-language-support) usually follow the style of `stdenv.mkDerivation`, which accepts an attribute set or a fixed-point function taking an attribute set.
+
+```{=include=} chapters
+build-helpers/fetchers.chapter.md
+build-helpers/trivial-build-helpers.chapter.md
+build-helpers/testers.chapter.md
+build-helpers/special.md
+build-helpers/images.md
+hooks/index.md
+languages-frameworks/index.md
+packages/index.md
+```
diff --git a/nixpkgs/doc/build-helpers/fetchers.chapter.md b/nixpkgs/doc/build-helpers/fetchers.chapter.md
new file mode 100644
index 000000000000..5c7c3257e6d4
--- /dev/null
+++ b/nixpkgs/doc/build-helpers/fetchers.chapter.md
@@ -0,0 +1,291 @@
+# Fetchers {#chap-pkgs-fetchers}
+
+Building software with Nix often requires downloading source code and other files from the internet.
+To this end, Nixpkgs provides *fetchers*: functions to obtain remote sources via various protocols and services.
+
+Nixpkgs fetchers differ from built-in fetchers such as [`builtins.fetchTarball`](https://nixos.org/manual/nix/stable/language/builtins.html#builtins-fetchTarball):
+- A built-in fetcher will download and cache files at evaluation time and produce a [store path](https://nixos.org/manual/nix/stable/glossary#gloss-store-path).
+  A Nixpkgs fetcher will create a ([fixed-output](https://nixos.org/manual/nix/stable/glossary#gloss-fixed-output-derivation)) [derivation](https://nixos.org/manual/nix/stable/language/derivations), and files are downloaded at build time.
+- Built-in fetchers will invalidate their cache after [`tarball-ttl`](https://nixos.org/manual/nix/stable/command-ref/conf-file#conf-tarball-ttl) expires, and will require network activity to check if the cache entry is up to date.
+  Nixpkgs fetchers only re-download if the specified hash changes or the store object is not otherwise available.
+- Built-in fetchers do not use [substituters](https://nixos.org/manual/nix/stable/command-ref/conf-file#conf-substituters).
+  Derivations produced by Nixpkgs fetchers will use any configured binary cache transparently.
+
+This significantly reduces the time needed to evaluate the entirety of Nixpkgs, and allows [Hydra](https://nixos.org/hydra) to retain and re-distribute sources used by Nixpkgs in the [public binary cache](https://cache.nixos.org).
+For these reasons, built-in fetchers are not allowed in Nixpkgs source code.
+
+The following table shows an overview of the differences:
+
+| Fetchers | Download | Output | Cache | Re-download when |
+|-|-|-|-|-|
+| `builtins.fetch*` | evaluation time | store path | `/nix/store`, `~/.cache/nix` | `tarball-ttl` expires, cache miss in `~/.cache/nix`, output store object not in local store |
+| `pkgs.fetch*` | build time | derivation | `/nix/store`, substituters | output store object not available |
+
+## Caveats {#chap-pkgs-fetchers-caveats}
+
+The fact that the hash belongs to the Nix derivation output and not the file itself can lead to confusion.
+For example, consider the following fetcher:
+
+```nix
+fetchurl {
+  url = "http://www.example.org/hello-1.0.tar.gz";
+  hash = "sha256-lTeyxzJNQeMdu1IVdovNMtgn77jRIhSybLdMbTkf2Ww=";
+}
+```
+
+A common mistake is to update a fetcher’s URL, or a version parameter, without updating the hash.
+
+```nix
+fetchurl {
+  url = "http://www.example.org/hello-1.1.tar.gz";
+  hash = "sha256-lTeyxzJNQeMdu1IVdovNMtgn77jRIhSybLdMbTkf2Ww=";
+}
+```
+
+**This will reuse the old contents**.
+Remember to invalidate the hash argument, in this case by setting the `hash` attribute to an empty string.
+
+```nix
+fetchurl {
+  url = "http://www.example.org/hello-1.1.tar.gz";
+  hash = "";
+}
+```
+
+Use the resulting error message to determine the correct hash.
+
+```
+error: hash mismatch in fixed-output derivation '/path/to/my.drv':
+         specified: sha256-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
+            got:    sha256-lTeyxzJNQeMdu1IVdovNMtgn77jRIhSybLdMbTkf2Ww=
+```
+
+A similar problem arises while testing changes to a fetcher's implementation. If the output of the derivation already exists in the Nix store, test failures can go undetected. The [`invalidateFetcherByDrvHash`](#tester-invalidateFetcherByDrvHash) function helps prevent reusing cached derivations.
+
+## `fetchurl` and `fetchzip` {#fetchurl}
+
+Two basic fetchers are `fetchurl` and `fetchzip`. Both of these have two required arguments, a URL and a hash. The hash is typically `hash`, although many more hash algorithms are supported. Nixpkgs contributors are currently recommended to use `hash`. This hash will be used by Nix to identify your source. A typical usage of `fetchurl` is provided below.
+
+```nix
+{ stdenv, fetchurl }:
+
+stdenv.mkDerivation {
+  name = "hello";
+  src = fetchurl {
+    url = "http://www.example.org/hello.tar.gz";
+    hash = "sha256-BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB=";
+  };
+}
+```
+
+The main difference between `fetchurl` and `fetchzip` is in how they store the contents. `fetchurl` will store the unaltered contents of the URL within the Nix store. `fetchzip` on the other hand, will decompress the archive for you, making files and directories directly accessible in the future. `fetchzip` can only be used with archives. Despite the name, `fetchzip` is not limited to .zip files and can also be used with any tarball.
+
+Additional parameters to `fetchurl`:
+- `downloadToTemp`: Defaults to `false`. If `true`, saves the source to `$downloadedFile`, to be used in conjunction with `postFetch`
+- `postFetch`: Shell code executed after the file has been fetched successfully. Use it for postprocessing, to check or transform the file.
+
+## `fetchpatch` {#fetchpatch}
+
+`fetchpatch` works very similarly to `fetchurl` with the same arguments expected. It expects patch files as a source and performs normalization on them before computing the checksum. For example, it will remove comments or other unstable parts that are sometimes added by version control systems and can change over time.
+
+- `relative`: Similar to using `git-diff`'s `--relative` flag, only keep changes inside the specified directory, making paths relative to it.
+- `stripLen`: Remove the first `stripLen` components of pathnames in the patch.
+- `decode`: Pipe the downloaded data through this command before processing it as a patch.
+- `extraPrefix`: Prefix pathnames by this string.
+- `excludes`: Exclude files matching these patterns (applies after the above arguments).
+- `includes`: Include only files matching these patterns (applies after the above arguments).
+- `revert`: Revert the patch.
+
+Note that because the checksum is computed after applying these effects, using or modifying these arguments will have no effect unless the `hash` argument is changed as well.
+
+
+Most other fetchers return a directory rather than a single file.
+
+
+## `fetchDebianPatch` {#fetchdebianpatch}
+
+A wrapper around `fetchpatch`, which takes:
+- `patch` and `hash`: the patch's filename,
+  and its hash after normalization by `fetchpatch` ;
+- `pname`: the Debian source package's name ;
+- `version`: the upstream version number ;
+- `debianRevision`: the [Debian revision number] if applicable ;
+- the `area` of the Debian archive: `main` (default), `contrib`, or `non-free`.
+
+Here is an example of `fetchDebianPatch` in action:
+
+```nix
+{ lib
+, fetchDebianPatch
+, buildPythonPackage
+}:
+
+buildPythonPackage rec {
+  pname = "pysimplesoap";
+  version = "1.16.2";
+  src = <...>;
+
+  patches = [
+    (fetchDebianPatch {
+      inherit pname version;
+      debianRevision = "5";
+      name = "Add-quotes-to-SOAPAction-header-in-SoapClient.patch";
+      hash = "sha256-xA8Wnrpr31H8wy3zHSNfezFNjUJt1HbSXn3qUMzeKc0=";
+    })
+  ];
+
+  # ...
+}
+```
+
+Patches are fetched from `sources.debian.org`, and so must come from a
+package version that was uploaded to the Debian archive.  Packages may
+be removed from there once that specific version isn't in any suite
+anymore (stable, testing, unstable, etc.), so maintainers should use
+`copy-tarballs.pl` to archive the patch if it needs to be available
+longer-term.
+
+[Debian revision number]: https://www.debian.org/doc/debian-policy/ch-controlfields.html#version
+
+
+## `fetchsvn` {#fetchsvn}
+
+Used with Subversion. Expects `url` to a Subversion directory, `rev`, and `hash`.
+
+## `fetchgit` {#fetchgit}
+
+Used with Git. Expects `url` to a Git repo, `rev`, and `hash`. `rev` in this case can be full the git commit id (SHA1 hash) or a tag name like `refs/tags/v1.0`.
+
+Additionally, the following optional arguments can be given: `fetchSubmodules = true` makes `fetchgit` also fetch the submodules of a repository. If `deepClone` is set to true, the entire repository is cloned as opposing to just creating a shallow clone. `deepClone = true` also implies `leaveDotGit = true` which means that the `.git` directory of the clone won't be removed after checkout.
+
+If only parts of the repository are needed, `sparseCheckout` can be used. This will prevent git from fetching unnecessary blobs from server, see [git sparse-checkout](https://git-scm.com/docs/git-sparse-checkout) for more information:
+
+```nix
+{ stdenv, fetchgit }:
+
+stdenv.mkDerivation {
+  name = "hello";
+  src = fetchgit {
+    url = "https://...";
+    sparseCheckout = [
+      "directory/to/be/included"
+      "another/directory"
+    ];
+    hash = "sha256-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=";
+  };
+}
+```
+
+## `fetchfossil` {#fetchfossil}
+
+Used with Fossil. Expects `url` to a Fossil archive, `rev`, and `hash`.
+
+## `fetchcvs` {#fetchcvs}
+
+Used with CVS. Expects `cvsRoot`, `tag`, and `hash`.
+
+## `fetchhg` {#fetchhg}
+
+Used with Mercurial. Expects `url`, `rev`, and `hash`.
+
+A number of fetcher functions wrap part of `fetchurl` and `fetchzip`. They are mainly convenience functions intended for commonly used destinations of source code in Nixpkgs. These wrapper fetchers are listed below.
+
+## `fetchFromGitea` {#fetchfromgitea}
+
+`fetchFromGitea` expects five arguments. `domain` is the gitea server name. `owner` is a string corresponding to the Gitea user or organization that controls this repository. `repo` corresponds to the name of the software repository. These are located at the top of every Gitea HTML page as `owner`/`repo`. `rev` corresponds to the Git commit hash or tag (e.g `v1.0`) that will be downloaded from Git. Finally, `hash` corresponds to the hash of the extracted directory. Again, other hash algorithms are also available but `hash` is currently preferred.
+
+## `fetchFromGitHub` {#fetchfromgithub}
+
+`fetchFromGitHub` expects four arguments. `owner` is a string corresponding to the GitHub user or organization that controls this repository. `repo` corresponds to the name of the software repository. These are located at the top of every GitHub HTML page as `owner`/`repo`. `rev` corresponds to the Git commit hash or tag (e.g `v1.0`) that will be downloaded from Git. Finally, `hash` corresponds to the hash of the extracted directory. Again, other hash algorithms are also available, but `hash` is currently preferred.
+
+To use a different GitHub instance, use `githubBase` (defaults to `"github.com"`).
+
+`fetchFromGitHub` uses `fetchzip` to download the source archive generated by GitHub for the specified revision. If `leaveDotGit`, `deepClone` or `fetchSubmodules` are set to `true`, `fetchFromGitHub` will use `fetchgit` instead. Refer to its section for documentation of these options.
+
+## `fetchFromGitLab` {#fetchfromgitlab}
+
+This is used with GitLab repositories. It behaves similarly to `fetchFromGitHub`, and expects `owner`, `repo`, `rev`, and `hash`.
+
+To use a specific GitLab instance, use `domain` (defaults to `"gitlab.com"`).
+
+
+## `fetchFromGitiles` {#fetchfromgitiles}
+
+This is used with Gitiles repositories. The arguments expected are similar to `fetchgit`.
+
+## `fetchFromBitbucket` {#fetchfrombitbucket}
+
+This is used with BitBucket repositories. The arguments expected are very similar to `fetchFromGitHub` above.
+
+## `fetchFromSavannah` {#fetchfromsavannah}
+
+This is used with Savannah repositories. The arguments expected are very similar to `fetchFromGitHub` above.
+
+## `fetchFromRepoOrCz` {#fetchfromrepoorcz}
+
+This is used with repo.or.cz repositories. The arguments expected are very similar to `fetchFromGitHub` above.
+
+## `fetchFromSourcehut` {#fetchfromsourcehut}
+
+This is used with sourcehut repositories. Similar to `fetchFromGitHub` above,
+it expects `owner`, `repo`, `rev` and `hash`, but don't forget the tilde (~)
+in front of the username! Expected arguments also include `vc` ("git" (default)
+or "hg"), `domain` and `fetchSubmodules`.
+
+If `fetchSubmodules` is `true`, `fetchFromSourcehut` uses `fetchgit`
+or `fetchhg` with `fetchSubmodules` or `fetchSubrepos` set to `true`,
+respectively. Otherwise, the fetcher uses `fetchzip`.
+
+## `requireFile` {#requirefile}
+
+`requireFile` allows requesting files that cannot be fetched automatically, but whose content is known.
+This is a useful last-resort workaround for license restrictions that prohibit redistribution, or for downloads that are only accessible after authenticating interactively in a browser.
+If the requested file is present in the Nix store, the resulting derivation will not be built, because its expected output is already available.
+Otherwise, the builder will run, but fail with a message explaining to the user how to provide the file. The following code, for example:
+
+```nix
+requireFile {
+  name = "jdk-${version}_linux-x64_bin.tar.gz";
+  url = "https://www.oracle.com/java/technologies/javase-jdk11-downloads.html";
+  hash = "sha256-lL00+F7jjT71nlKJ7HRQuUQ7kkxVYlZh//5msD8sjeI=";
+}
+```
+results in this error message:
+```
+***
+Unfortunately, we cannot download file jdk-11.0.10_linux-x64_bin.tar.gz automatically.
+Please go to https://www.oracle.com/java/technologies/javase-jdk11-downloads.html to download it yourself, and add it to the Nix store
+using either
+  nix-store --add-fixed sha256 jdk-11.0.10_linux-x64_bin.tar.gz
+or
+  nix-prefetch-url --type sha256 file:///path/to/jdk-11.0.10_linux-x64_bin.tar.gz
+
+***
+```
+
+This function should only be used by non-redistributable software with an unfree license that we need to require the user to download manually.
+It produces packages that cannot be built automatically.
+
+## `fetchtorrent` {#fetchtorrent}
+
+`fetchtorrent` expects two arguments. `url` which can either be a Magnet URI (Magnet Link) such as `magnet:?xt=urn:btih:dd8255ecdc7ca55fb0bbf81323d87062db1f6d1c` or an HTTP URL pointing to a `.torrent` file. It can also take a `config` argument which will craft a `settings.json` configuration file and give it to `transmission`, the underlying program that is performing the fetch. The available config options for `transmission` can be found [here](https://github.com/transmission/transmission/blob/main/docs/Editing-Configuration-Files.md#options)
+
+```nix
+{ fetchtorrent }:
+
+fetchtorrent {
+  config = { peer-limit-global = 100; };
+  url = "magnet:?xt=urn:btih:dd8255ecdc7ca55fb0bbf81323d87062db1f6d1c";
+  sha256 = "";
+}
+```
+
+### Parameters {#fetchtorrent-parameters}
+
+- `url`: Magnet URI (Magnet Link) such as `magnet:?xt=urn:btih:dd8255ecdc7ca55fb0bbf81323d87062db1f6d1c` or an HTTP URL pointing to a `.torrent` file.
+
+- `backend`: Which bittorrent program to use. Default: `"transmission"`. Valid values are `"rqbit"` or `"transmission"`. These are the two most suitable torrent clients for fetching in a fixed-output derivation at the time of writing, as they can be easily exited after usage. `rqbit` is written in Rust and has a smaller closure size than `transmission`, and the performance and peer discovery properties differs between these clients, requiring experimentation to decide upon which is the best.
+
+- `config`: When using `transmission` as the `backend`, a json configuration can
+  be supplied to transmission. Refer to the [upstream documentation](https://github.com/transmission/transmission/blob/main/docs/Editing-Configuration-Files.md) for information on how to configure.
+
diff --git a/nixpkgs/doc/build-helpers/images.md b/nixpkgs/doc/build-helpers/images.md
new file mode 100644
index 000000000000..033891fcef48
--- /dev/null
+++ b/nixpkgs/doc/build-helpers/images.md
@@ -0,0 +1,12 @@
+# Images {#chap-images}
+
+This chapter describes tools for creating various types of images.
+
+```{=include=} sections
+images/appimagetools.section.md
+images/dockertools.section.md
+images/ocitools.section.md
+images/portableservice.section.md
+images/makediskimage.section.md
+images/binarycache.section.md
+```
diff --git a/nixpkgs/doc/build-helpers/images/appimagetools.section.md b/nixpkgs/doc/build-helpers/images/appimagetools.section.md
new file mode 100644
index 000000000000..4d00e49c397d
--- /dev/null
+++ b/nixpkgs/doc/build-helpers/images/appimagetools.section.md
@@ -0,0 +1,167 @@
+# pkgs.appimageTools {#sec-pkgs-appimageTools}
+
+`pkgs.appimageTools` is a set of functions for extracting and wrapping [AppImage](https://appimage.org/) files.
+They are meant to be used if traditional packaging from source is infeasible, or if it would take too long.
+To quickly run an AppImage file, `pkgs.appimage-run` can be used as well.
+
+::: {.warning}
+The `appimageTools` API is unstable and may be subject to backwards-incompatible changes in the future.
+:::
+
+## Wrapping {#ssec-pkgs-appimageTools-wrapping}
+
+Use `wrapType2` to wrap any AppImage.
+This will create a FHS environment with many packages [expected to exist](https://github.com/AppImage/pkg2appimage/blob/master/excludelist) for the AppImage to work.
+`wrapType2` expects an argument with the `src` attribute, and either a `name` attribute or `pname` and `version` attributes.
+
+It will eventually call into [`buildFHSEnv`](#sec-fhs-environments), and any extra attributes in the argument to `wrapType2` will be passed through to it.
+This means that you can pass the `extraInstallCommands` attribute, for example, and it will have the same effect as described in [`buildFHSEnv`](#sec-fhs-environments).
+
+::: {.note}
+In the past, `appimageTools` provided both `wrapType1` and `wrapType2`, to be used depending on the type of AppImage that was being wrapped.
+However, [those were unified early 2020](https://github.com/NixOS/nixpkgs/pull/81833), meaning that both `wrapType1` and `wrapType2` have the same behaviour now.
+:::
+
+:::{.example #ex-wrapping-appimage-from-github}
+
+# Wrapping an AppImage from GitHub
+
+```nix
+{ appimageTools, fetchurl }:
+let
+  pname = "nuclear";
+  version = "0.6.30";
+
+  src = fetchurl {
+    url = "https://github.com/nukeop/nuclear/releases/download/v${version}/${pname}-v${version}.AppImage";
+    hash = "sha256-he1uGC1M/nFcKpMM9JKY4oeexJcnzV0ZRxhTjtJz6xw=";
+  };
+in
+appimageTools.wrapType2 {
+  inherit pname version src;
+}
+```
+
+:::
+
+The argument passed to `wrapType2` can also contain an `extraPkgs` attribute, which allows you to include additional packages inside the FHS environment your AppImage is going to run in.
+`extraPkgs` must be a function that returns a list of packages.
+There are a few ways to learn which dependencies an application needs:
+
+  - Looking through the extracted AppImage files, reading its scripts and running `patchelf` and `ldd` on its executables.
+    This can also be done in `appimage-run`, by setting `APPIMAGE_DEBUG_EXEC=bash`.
+  - Running `strace -vfefile` on the wrapped executable, looking for libraries that can't be found.
+
+:::{.example #ex-wrapping-appimage-with-extrapkgs}
+
+# Wrapping an AppImage with extra packages
+
+```nix
+{ appimageTools, fetchurl }:
+let
+  pname = "irccloud";
+  version = "0.16.0";
+
+  src = fetchurl {
+    url = "https://github.com/irccloud/irccloud-desktop/releases/download/v${version}/IRCCloud-${version}-linux-x86_64.AppImage";
+    sha256 = "sha256-/hMPvYdnVB1XjKgU2v47HnVvW4+uC3rhRjbucqin4iI=";
+  };
+in appimageTools.wrapType2 {
+  inherit pname version src;
+  extraPkgs = pkgs: [ pkgs.at-spi2-core ];
+}
+```
+
+:::
+
+## Extracting {#ssec-pkgs-appimageTools-extracting}
+
+Use `extract` if you need to extract the contents of an AppImage.
+This is usually used in Nixpkgs to install extra files in addition to [wrapping](#ssec-pkgs-appimageTools-wrapping) the AppImage.
+`extract` expects an argument with the `src` attribute, and either a `name` attribute or `pname` and `version` attributes.
+
+::: {.note}
+In the past, `appimageTools` provided both `extractType1` and `extractType2`, to be used depending on the type of AppImage that was being extracted.
+However, [those were unified early 2020](https://github.com/NixOS/nixpkgs/pull/81572), meaning that both `extractType1` and `extractType2` have the same behaviour as `extract` now.
+:::
+
+:::{.example #ex-extracting-appimage}
+
+# Extracting an AppImage to install extra files
+
+This example was adapted from a real package in Nixpkgs to show how `extract` is usually used in combination with `wrapType2`.
+Note how `appimageContents` is used in `extraInstallCommands` to install additional files that were extracted from the AppImage.
+
+```nix
+{ appimageTools, fetchurl }:
+let
+  pname = "irccloud";
+  version = "0.16.0";
+
+  src = fetchurl {
+    url = "https://github.com/irccloud/irccloud-desktop/releases/download/v${version}/IRCCloud-${version}-linux-x86_64.AppImage";
+    sha256 = "sha256-/hMPvYdnVB1XjKgU2v47HnVvW4+uC3rhRjbucqin4iI=";
+  };
+
+  appimageContents = appimageTools.extract {
+    inherit pname version src;
+  };
+in appimageTools.wrapType2 {
+  inherit pname version src;
+
+  extraPkgs = pkgs: [ pkgs.at-spi2-core ];
+
+  extraInstallCommands = ''
+    mv $out/bin/${pname}-${version} $out/bin/${pname}
+    install -m 444 -D ${appimageContents}/irccloud.desktop $out/share/applications/irccloud.desktop
+    install -m 444 -D ${appimageContents}/usr/share/icons/hicolor/512x512/apps/irccloud.png \
+      $out/share/icons/hicolor/512x512/apps/irccloud.png
+    substituteInPlace $out/share/applications/irccloud.desktop \
+      --replace 'Exec=AppRun' 'Exec=${pname}'
+  '';
+}
+```
+
+:::
+
+The argument passed to `extract` can also contain a `postExtract` attribute, which allows you to execute additional commands after the files are extracted from the AppImage.
+`postExtract` must be a string with commands to run.
+
+:::{.example #ex-extracting-appimage-with-postextract}
+
+# Extracting an AppImage to install extra files, using `postExtract`
+
+This is a rewrite of [](#ex-extracting-appimage) to use `postExtract`.
+
+```nix
+{ appimageTools, fetchurl }:
+let
+  pname = "irccloud";
+  version = "0.16.0";
+
+  src = fetchurl {
+    url = "https://github.com/irccloud/irccloud-desktop/releases/download/v${version}/IRCCloud-${version}-linux-x86_64.AppImage";
+    sha256 = "sha256-/hMPvYdnVB1XjKgU2v47HnVvW4+uC3rhRjbucqin4iI=";
+  };
+
+  appimageContents = appimageTools.extract {
+    inherit pname version src;
+    postExtract = ''
+      substituteInPlace $out/irccloud.desktop --replace 'Exec=AppRun' 'Exec=${pname}'
+    '';
+  };
+in appimageTools.wrapType2 {
+  inherit pname version src;
+
+  extraPkgs = pkgs: [ pkgs.at-spi2-core ];
+
+  extraInstallCommands = ''
+    mv $out/bin/${pname}-${version} $out/bin/${pname}
+    install -m 444 -D ${appimageContents}/irccloud.desktop $out/share/applications/irccloud.desktop
+    install -m 444 -D ${appimageContents}/usr/share/icons/hicolor/512x512/apps/irccloud.png \
+      $out/share/icons/hicolor/512x512/apps/irccloud.png
+  '';
+}
+```
+
+:::
diff --git a/nixpkgs/doc/build-helpers/images/binarycache.section.md b/nixpkgs/doc/build-helpers/images/binarycache.section.md
new file mode 100644
index 000000000000..9946603c958e
--- /dev/null
+++ b/nixpkgs/doc/build-helpers/images/binarycache.section.md
@@ -0,0 +1,58 @@
+# pkgs.mkBinaryCache {#sec-pkgs-binary-cache}
+
+`pkgs.mkBinaryCache` is a function for creating Nix flat-file binary caches.
+Such a cache exists as a directory on disk, and can be used as a Nix substituter by passing `--substituter file:///path/to/cache` to Nix commands.
+
+Nix packages are most commonly shared between machines using [HTTP, SSH, or S3](https://nixos.org/manual/nix/stable/package-management/sharing-packages.html), but a flat-file binary cache can still be useful in some situations.
+For example, you can copy it directly to another machine, or make it available on a network file system.
+It can also be a convenient way to make some Nix packages available inside a container via bind-mounting.
+
+`mkBinaryCache` expects an argument with the `rootPaths` attribute.
+`rootPaths` must be a list of derivations.
+The transitive closure of these derivations' outputs will be copied into the cache.
+
+::: {.note}
+This function is meant for advanced use cases.
+The more idiomatic way to work with flat-file binary caches is via the [nix-copy-closure](https://nixos.org/manual/nix/stable/command-ref/nix-copy-closure.html) command.
+You may also want to consider [dockerTools](#sec-pkgs-dockerTools) for your containerization needs.
+:::
+
+[]{#sec-pkgs-binary-cache-example}
+:::{.example #ex-mkbinarycache-copying-package-closure}
+
+# Copying a package and its closure to another machine with `mkBinaryCache`
+
+The following derivation will construct a flat-file binary cache containing the closure of `hello`.
+
+```nix
+{ mkBinaryCache, hello }:
+mkBinaryCache {
+  rootPaths = [hello];
+}
+```
+
+Build the cache on a machine.
+Note that the command still builds the exact nix package above, but adds some boilerplate to build it directly from an expression.
+
+```shellSession
+$ nix-build -E 'let pkgs = import <nixpkgs> {}; in pkgs.callPackage ({ mkBinaryCache, hello }: mkBinaryCache { rootPaths = [hello]; }) {}'
+/nix/store/azf7xay5xxdnia4h9fyjiv59wsjdxl0g-binary-cache
+```
+
+Copy the resulting directory to another machine, which we'll call `host2`:
+
+```shellSession
+$ scp result host2:/tmp/hello-cache
+```
+
+At this point, the cache can be used as a substituter when building derivations on `host2`:
+
+```shellSession
+$ nix-build -A hello '<nixpkgs>' \
+  --option require-sigs false \
+  --option trusted-substituters file:///tmp/hello-cache \
+  --option substituters file:///tmp/hello-cache
+/nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1
+```
+
+:::
diff --git a/nixpkgs/doc/build-helpers/images/dockertools.section.md b/nixpkgs/doc/build-helpers/images/dockertools.section.md
new file mode 100644
index 000000000000..527e623e7898
--- /dev/null
+++ b/nixpkgs/doc/build-helpers/images/dockertools.section.md
@@ -0,0 +1,1587 @@
+# pkgs.dockerTools {#sec-pkgs-dockerTools}
+
+`pkgs.dockerTools` is a set of functions for creating and manipulating Docker images according to the [Docker Image Specification v1.3.0](https://github.com/moby/moby/blob/46f7ab808b9504d735d600e259ca0723f76fb164/image/spec/spec.md#image-json-field-descriptions).
+Docker itself is not used to perform any of the operations done by these functions.
+
+## buildImage {#ssec-pkgs-dockerTools-buildImage}
+
+This function builds a Docker-compatible repository tarball containing a single image.
+As such, the result is suitable for being loaded in Docker with `docker image load` (see [](#ex-dockerTools-buildImage) for how to do this).
+
+This function will create a single layer for all files (and dependencies) that are specified in its argument.
+Only new dependencies that are not already in the existing layers will be copied.
+If you prefer to create multiple layers for the files and dependencies you want to add to the image, see [](#ssec-pkgs-dockerTools-buildLayeredImage) or [](#ssec-pkgs-dockerTools-streamLayeredImage) instead.
+
+This function allows a script to be run during the layer generation process, allowing custom behaviour to affect the final results of the image (see the documentation of the `runAsRoot` and `extraCommands` attributes).
+
+The resulting repository tarball will list a single image as specified by the `name` and `tag` attributes.
+By default, that image will use a static creation date (see documentation for the `created` attribute).
+This allows `buildImage` to produce reproducible images.
+
+:::{.tip}
+When running an image built with `buildImage`, you might encounter certain errors depending on what you included in the image, especially if you did not start with any base image.
+
+If you encounter errors similar to `getProtocolByName: does not exist (no such protocol name: tcp)`, you may need to add the contents of `pkgs.iana-etc` in the `copyToRoot` attribute.
+Similarly, if you encounter errors similar to `Error_Protocol ("certificate has unknown CA",True,UnknownCa)`, you may need to add the contents of `pkgs.cacert` in the `copyToRoot` attribute.
+:::
+
+### Inputs {#ssec-pkgs-dockerTools-buildImage-inputs}
+
+`buildImage` expects an argument with the following attributes:
+
+`name` (String)
+
+: The name of the generated image.
+
+`tag` (String or Null; _optional_)
+
+: Tag of the generated image.
+  If `null`, the hash of the nix derivation will be used as the tag.
+
+  _Default value:_ `null`.
+
+`fromImage` (Path or Null; _optional_)
+
+: The repository tarball of an image to be used as the base for the generated image.
+  It must be a valid Docker image, such as one exported by `docker image save`, or another image built with the `dockerTools` utility functions.
+  This can be seen as an equivalent of `FROM fromImage` in a `Dockerfile`.
+  A value of `null` can be seen as an equivalent of `FROM scratch`.
+
+  If specified, the layer created by `buildImage` will be appended to the layers defined in the base image, resulting in an image with at least two layers (one or more layers from the base image, and the layer created by `buildImage`).
+  Otherwise, the resulting image with contain the single layer created by `buildImage`.
+
+  _Default value:_ `null`.
+
+`fromImageName` (String or Null; _optional_)
+
+: Used to specify the image within the repository tarball in case it contains multiple images.
+  A value of `null` means that `buildImage` will use the first image available in the repository.
+
+  :::{.note}
+  This must be used with `fromImageTag`. Using only `fromImageName` without `fromImageTag` will make `buildImage` use the first image available in the repository.
+  :::
+
+  _Default value:_ `null`.
+
+`fromImageTag` (String or Null; _optional_)
+
+: Used to specify the image within the repository tarball in case it contains multiple images.
+  A value of `null` means that `buildImage` will use the first image available in the repository.
+
+  :::{.note}
+  This must be used with `fromImageName`. Using only `fromImageTag` without `fromImageName` will make `buildImage` use the first image available in the repository
+  :::
+
+  _Default value:_ `null`.
+
+`copyToRoot` (Path, List of Paths, or Null; _optional_)
+
+: Files to add to the generated image.
+  Anything that coerces to a path (e.g. a derivation) can also be used.
+  This can be seen as an equivalent of `ADD contents/ /` in a `Dockerfile`.
+
+  _Default value:_ `null`.
+
+`keepContentsDirlinks` (Boolean; _optional_)
+
+: When adding files to the generated image (as specified by `copyToRoot`), this attribute controls whether to preserve symlinks to directories.
+  If `false`, the symlinks will be transformed into directories.
+  This behaves the same as `rsync -k` when `keepContentsDirlinks` is `false`, and the same as `rsync -K` when `keepContentsDirlinks` is `true`.
+
+  _Default value:_ `false`.
+
+`runAsRoot` (String or Null; _optional_)
+
+: A bash script that will run as root inside a VM that contains the existing layers of the base image and the new generated layer (including the files from `copyToRoot`).
+  The script will be run with a working directory of `/`.
+  This can be seen as an equivalent of `RUN ...` in a `Dockerfile`.
+  A value of `null` means that this step in the image generation process will be skipped.
+
+  See [](#ex-dockerTools-buildImage-runAsRoot) for how to work with this attribute.
+
+  :::{.caution}
+  Using this attribute requires the `kvm` device to be available, see [`system-features`](https://nixos.org/manual/nix/stable/command-ref/conf-file.html#conf-system-features).
+  If the `kvm` device isn't available, you should consider using [`buildLayeredImage`](#ssec-pkgs-dockerTools-buildLayeredImage) or [`streamLayeredImage`](#ssec-pkgs-dockerTools-streamLayeredImage) instead.
+  Those functions allow scripts to be run as root without access to the `kvm` device.
+  :::
+
+  :::{.note}
+  At the time the script in `runAsRoot` is run, the files specified directly in `copyToRoot` will be present in the VM, but their dependencies might not be there yet.
+  Copying their dependencies into the generated image is a step that happens after `runAsRoot` finishes running.
+  :::
+
+  _Default value:_ `null`.
+
+`extraCommands` (String; _optional_)
+
+: A bash script that will run before the layer created by `buildImage` is finalised.
+  The script will be run on some (opaque) working directory which will become `/` once the layer is created.
+  This is similar to `runAsRoot`, but the script specified in `extraCommands` is **not** run as root, and does not involve creating a VM.
+  It is simply run as part of building the derivation that outputs the layer created by `buildImage`.
+
+  See [](#ex-dockerTools-buildImage-extraCommands) for how to work with this attribute, and subtle differences compared to `runAsRoot`.
+
+  _Default value:_ `""`.
+
+`config` (Attribute Set or Null; _optional_)
+
+: Used to specify the configuration of the containers that will be started off the generated image.
+  Must be an attribute set, with each attribute as listed in the [Docker Image Specification v1.3.0](https://github.com/moby/moby/blob/46f7ab808b9504d735d600e259ca0723f76fb164/image/spec/spec.md#image-json-field-descriptions).
+
+  _Default value:_ `null`.
+
+`architecture` (String; _optional_)
+
+: Used to specify the image architecture.
+  This is useful for multi-architecture builds that don't need cross compiling.
+  If specified, its value should follow the [OCI Image Configuration Specification](https://github.com/opencontainers/image-spec/blob/main/config.md#properties), which should still be compatible with Docker.
+  According to the linked specification, all possible values for `$GOARCH` in [the Go docs](https://go.dev/doc/install/source#environment) should be valid, but will commonly be one of `386`, `amd64`, `arm`, or `arm64`.
+
+  _Default value:_ the same value from `pkgs.go.GOARCH`.
+
+`diskSize` (Number; _optional_)
+
+: Controls the disk size (in megabytes) of the VM used to run the script specified in `runAsRoot`.
+  This attribute is ignored if `runAsRoot` is `null`.
+
+  _Default value:_ 1024.
+
+`buildVMMemorySize` (Number; _optional_)
+
+: Controls the amount of memory (in megabytes) provisioned for the VM used to run the script specified in `runAsRoot`.
+  This attribute is ignored if `runAsRoot` is `null`.
+
+  _Default value:_ 512.
+
+`created` (String; _optional_)
+
+: Specifies the time of creation of the generated image.
+  This should be either a date and time formatted according to [ISO-8601](https://en.wikipedia.org/wiki/ISO_8601) or `"now"`, in which case `buildImage` will use the current date.
+
+  See [](#ex-dockerTools-buildImage-creatednow) for how to use `"now"`.
+
+  :::{.caution}
+  Using `"now"` means that the generated image will not be reproducible anymore (because the date will always change whenever it's built).
+  :::
+
+  _Default value:_ `"1970-01-01T00:00:01Z"`.
+
+`uid` (Number; _optional_)
+
+: The uid of the user that will own the files packed in the new layer built by `buildImage`.
+
+  _Default value:_ 0.
+
+`gid` (Number; _optional_)
+
+: The gid of the group that will own the files packed in the new layer built by `buildImage`.
+
+  _Default value:_ 0.
+
+`compressor` (String; _optional_)
+
+: Selects the algorithm used to compress the image.
+
+  _Default value:_ `"gz"`.\
+  _Possible values:_ `"none"`, `"gz"`, `"zstd"`.
+
+`contents` **DEPRECATED**
+
+: This attribute is deprecated, and users are encouraged to use `copyToRoot` instead.
+
+### Passthru outputs {#ssec-pkgs-dockerTools-buildImage-passthru-outputs}
+
+`buildImage` defines a few [`passthru`](#var-stdenv-passthru) attributes:
+
+`buildArgs` (Attribute Set)
+
+: The argument passed to `buildImage` itself.
+  This allows you to inspect all attributes specified in the argument, as described above.
+
+`layer` (Attribute Set)
+
+: The derivation with the layer created by `buildImage`.
+  This allows easier inspection of the contents added by `buildImage` in the generated image.
+
+`imageTag` (String)
+
+: The tag of the generated image.
+  This is useful if no tag was specified in the attributes of the argument to `buildImage`, because an automatic tag will be used instead.
+  `imageTag` allows you to retrieve the value of the tag used in this case.
+
+### Examples {#ssec-pkgs-dockerTools-buildImage-examples}
+
+:::{.example #ex-dockerTools-buildImage}
+# Building a Docker image
+
+The following package builds a Docker image that runs the `redis-server` executable from the `redis` package.
+The Docker image will have name `redis` and tag `latest`.
+
+```nix
+{ dockerTools, buildEnv, redis }:
+dockerTools.buildImage {
+  name = "redis";
+  tag = "latest";
+
+  copyToRoot = buildEnv {
+    name = "image-root";
+    paths = [ redis ];
+    pathsToLink = [ "/bin" ];
+  };
+
+  runAsRoot = ''
+    mkdir -p /data
+  '';
+
+  config = {
+    Cmd = [ "/bin/redis-server" ];
+    WorkingDir = "/data";
+    Volumes = { "/data" = { }; };
+  };
+}
+```
+
+The result of building this package is a `.tar.gz` file that can be loaded into Docker:
+
+```shell
+$ nix-build
+(some output removed for clarity)
+building '/nix/store/yw0adm4wpsw1w6j4fb5hy25b3arr9s1v-docker-image-redis.tar.gz.drv'...
+Adding layer...
+tar: Removing leading `/' from member names
+Adding meta...
+Cooking the image...
+Finished.
+/nix/store/p4dsg62inh9d2ksy3c7bv58xa851dasr-docker-image-redis.tar.gz
+
+$ docker image load -i /nix/store/p4dsg62inh9d2ksy3c7bv58xa851dasr-docker-image-redis.tar.gz
+(some output removed for clarity)
+Loaded image: redis:latest
+```
+:::
+
+:::{.example #ex-dockerTools-buildImage-runAsRoot}
+# Building a Docker image with `runAsRoot`
+
+The following package builds a Docker image with the `hello` executable from the `hello` package.
+It uses `runAsRoot` to create a directory and a file inside the image.
+
+This works the same as [](#ex-dockerTools-buildImage-extraCommands), but uses `runAsRoot` instead of `extraCommands`.
+
+```nix
+{ dockerTools, buildEnv, hello }:
+dockerTools.buildImage {
+  name = "hello";
+  tag = "latest";
+
+  copyToRoot = buildEnv {
+    name = "image-root";
+    paths = [ hello ];
+    pathsToLink = [ "/bin" ];
+  };
+
+  runAsRoot = ''
+    mkdir -p /data
+    echo "some content" > my-file
+  '';
+
+  config = {
+    Cmd = [ "/bin/hello" ];
+    WorkingDir = "/data";
+  };
+}
+```
+:::
+
+:::{.example #ex-dockerTools-buildImage-extraCommands}
+# Building a Docker image with `extraCommands`
+
+The following package builds a Docker image with the `hello` executable from the `hello` package.
+It uses `extraCommands` to create a directory and a file inside the image.
+
+This works the same as [](#ex-dockerTools-buildImage-runAsRoot), but uses `extraCommands` instead of `runAsRoot`.
+Note that with `extraCommands`, we can't directly reference `/` and must create files and directories as if we were already on `/`.
+
+```nix
+{ dockerTools, buildEnv, hello }:
+dockerTools.buildImage {
+  name = "hello";
+  tag = "latest";
+
+  copyToRoot = buildEnv {
+    name = "image-root";
+    paths = [ hello ];
+    pathsToLink = [ "/bin" ];
+  };
+
+  extraCommands = ''
+    mkdir -p data
+    echo "some content" > my-file
+  '';
+
+  config = {
+    Cmd = [ "/bin/hello" ];
+    WorkingDir = "/data";
+  };
+}
+```
+:::
+
+:::{.example #ex-dockerTools-buildImage-creatednow}
+# Building a Docker image with a creation date set to the current time
+
+Note that using a value of `"now"` in the `created` attribute will break reproducibility.
+
+```nix
+{ dockerTools, buildEnv, hello }:
+dockerTools.buildImage {
+  name = "hello";
+  tag = "latest";
+
+  created = "now";
+
+  copyToRoot = buildEnv {
+    name = "image-root";
+    paths = [ hello ];
+    pathsToLink = [ "/bin" ];
+  };
+
+  config.Cmd = [ "/bin/hello" ];
+}
+```
+
+After importing the generated repository tarball with Docker, its CLI will display a reasonable date and sort the images as expected:
+
+```shell
+$ docker image ls
+REPOSITORY   TAG      IMAGE ID       CREATED              SIZE
+hello        latest   de2bf4786de6   About a minute ago   25.2MB
+```
+:::
+
+## buildLayeredImage {#ssec-pkgs-dockerTools-buildLayeredImage}
+
+`buildLayeredImage` uses [`streamLayeredImage`](#ssec-pkgs-dockerTools-streamLayeredImage) underneath to build a compressed Docker-compatible repository tarball.
+Basically, `buildLayeredImage` runs the script created by `streamLayeredImage` to save the compressed image in the Nix store.
+`buildLayeredImage` supports the same options as `streamLayeredImage`, see [`streamLayeredImage`](#ssec-pkgs-dockerTools-streamLayeredImage) for details.
+
+:::{.note}
+Despite the similar name, [`buildImage`](#ssec-pkgs-dockerTools-buildImage) works completely differently from `buildLayeredImage` and `streamLayeredImage`.
+
+Even though some of the arguments may seem related, they cannot be interchanged.
+:::
+
+You can load the result of this function in Docker with `docker image load`.
+See [](#ex-dockerTools-buildLayeredImage-hello) to see how to do that.
+
+### Examples {#ssec-pkgs-dockerTools-buildLayeredImage-examples}
+
+:::{.example #ex-dockerTools-buildLayeredImage-hello}
+# Building a layered Docker image
+
+The following package builds a layered Docker image that runs the `hello` executable from the `hello` package.
+The Docker image will have name `hello` and tag `latest`.
+
+```nix
+{ dockerTools, hello }:
+dockerTools.buildLayeredImage {
+  name = "hello";
+  tag = "latest";
+
+  contents = [ hello ];
+
+  config.Cmd = [ "/bin/hello" ];
+}
+```
+
+The result of building this package is a `.tar.gz` file that can be loaded into Docker:
+
+```shell
+$ nix-build
+(some output removed for clarity)
+building '/nix/store/bk8bnrbw10nq7p8pvcmdr0qf57y6scha-hello.tar.gz.drv'...
+No 'fromImage' provided
+Creating layer 1 from paths: ['/nix/store/i93s7xxblavsacpy82zdbn4kplsyq48l-libunistring-1.1']
+Creating layer 2 from paths: ['/nix/store/ji01n9vinnj22nbrb86nx8a1ssgpilx8-libidn2-2.3.4']
+Creating layer 3 from paths: ['/nix/store/ldrslljw4rg026nw06gyrdwl78k77vyq-xgcc-12.3.0-libgcc']
+Creating layer 4 from paths: ['/nix/store/9y8pmvk8gdwwznmkzxa6pwyah52xy3nk-glibc-2.38-27']
+Creating layer 5 from paths: ['/nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1']
+Creating layer 6 with customisation...
+Adding manifests...
+Done.
+/nix/store/hxcz7snvw7f8rzhbh6mv8jq39d992905-hello.tar.gz
+
+$ docker image load -i /nix/store/hxcz7snvw7f8rzhbh6mv8jq39d992905-hello.tar.gz
+(some output removed for clarity)
+Loaded image: hello:latest
+```
+:::
+
+## streamLayeredImage {#ssec-pkgs-dockerTools-streamLayeredImage}
+
+`streamLayeredImage` builds a **script** which, when run, will stream to stdout a Docker-compatible repository tarball containing a single image, using multiple layers to improve sharing between images.
+This means that `streamLayeredImage` does not output an image into the Nix store, but only a script that builds the image, saving on IO and disk/cache space, particularly with large images.
+
+You can load the result of this function in Docker with `docker image load`.
+See [](#ex-dockerTools-streamLayeredImage-hello) to see how to do that.
+
+For this function, you specify a [store path](https://nixos.org/manual/nix/stable/store/store-path) or a list of store paths to be added to the image, and the functions will automatically include any dependencies of those paths in the image.
+The function will attempt to create one layer per object in the Nix store that needs to be added to the image.
+In case there are more objects to include than available layers, the function will put the most ["popular"](https://github.com/NixOS/nixpkgs/tree/release-23.11/pkgs/build-support/references-by-popularity) objects in their own layers, and group all remaining objects into a single layer.
+
+An additional layer will be created with symlinks to the store paths you specified to be included in the image.
+These symlinks are built with [`symlinkJoin`](#trivial-builder-symlinkJoin), so they will be included in the root of the image.
+See [](#ex-dockerTools-streamLayeredImage-exploringlayers) to understand how these symlinks are laid out in the generated image.
+
+`streamLayeredImage` allows scripts to be run when creating the additional layer with symlinks, allowing custom behaviour to affect the final results of the image (see the documentation of the `extraCommands` and `fakeRootCommands` attributes).
+
+The resulting repository tarball will list a single image as specified by the `name` and `tag` attributes.
+By default, that image will use a static creation date (see documentation for the `created` attribute).
+This allows the function to produce reproducible images.
+
+### Inputs {#ssec-pkgs-dockerTools-streamLayeredImage-inputs}
+
+`streamLayeredImage` expects one argument with the following attributes:
+
+`name` (String)
+
+: The name of the generated image.
+
+`tag` (String or Null; _optional_)
+
+: Tag of the generated image.
+  If `null`, the hash of the nix derivation will be used as the tag.
+
+  _Default value:_ `null`.
+
+`fromImage`(Path or Null; _optional_)
+
+: The repository tarball of an image to be used as the base for the generated image.
+  It must be a valid Docker image, such as one exported by `docker image save`, or another image built with the `dockerTools` utility functions.
+  This can be seen as an equivalent of `FROM fromImage` in a `Dockerfile`.
+  A value of `null` can be seen as an equivalent of `FROM scratch`.
+
+  If specified, the created layers will be appended to the layers defined in the base image.
+
+  _Default value:_ `null`.
+
+`contents` (Path or List of Paths; _optional_) []{#dockerTools-buildLayeredImage-arg-contents}
+
+: Directories whose contents will be added to the generated image.
+  Things that coerce to paths (e.g. a derivation) can also be used.
+  This can be seen as an equivalent of `ADD contents/ /` in a `Dockerfile`.
+
+  All the contents specified by `contents` will be added as a final layer in the generated image.
+  They will be added as links to the actual files (e.g. links to the store paths).
+  The actual files will be added in previous layers.
+
+  _Default value:_ `[]`
+
+`config` (Attribute Set or Null; _optional_) []{#dockerTools-buildLayeredImage-arg-config}
+
+: Used to specify the configuration of the containers that will be started off the generated image.
+  Must be an attribute set, with each attribute as listed in the [Docker Image Specification v1.3.0](https://github.com/moby/moby/blob/46f7ab808b9504d735d600e259ca0723f76fb164/image/spec/spec.md#image-json-field-descriptions).
+
+  If any packages are used directly in `config`, they will be automatically included in the generated image.
+  See [](#ex-dockerTools-streamLayeredImage-configclosure) for an example.
+
+  _Default value:_ `null`.
+
+`architecture` (String; _optional_)
+
+: Used to specify the image architecture.
+  This is useful for multi-architecture builds that don't need cross compiling.
+  If specified, its value should follow the [OCI Image Configuration Specification](https://github.com/opencontainers/image-spec/blob/main/config.md#properties), which should still be compatible with Docker.
+  According to the linked specification, all possible values for `$GOARCH` in [the Go docs](https://go.dev/doc/install/source#environment) should be valid, but will commonly be one of `386`, `amd64`, `arm`, or `arm64`.
+
+  _Default value:_ the same value from `pkgs.go.GOARCH`.
+
+`created` (String; _optional_)
+
+: Specifies the time of creation of the generated image.
+  This should be either a date and time formatted according to [ISO-8601](https://en.wikipedia.org/wiki/ISO_8601) or `"now"`, in which case the current date will be used.
+
+  :::{.caution}
+  Using `"now"` means that the generated image will not be reproducible anymore (because the date will always change whenever it's built).
+  :::
+
+  _Default value:_ `"1970-01-01T00:00:01Z"`.
+
+`uid` (Number; _optional_) []{#dockerTools-buildLayeredImage-arg-uid}
+`gid` (Number; _optional_) []{#dockerTools-buildLayeredImage-arg-gid}
+`uname` (String; _optional_) []{#dockerTools-buildLayeredImage-arg-uname}
+`gname` (String; _optional_) []{#dockerTools-buildLayeredImage-arg-gname}
+
+: Credentials for Nix store ownership.
+  Can be overridden to e.g. `1000` / `1000` / `"user"` / `"user"` to enable building a container where Nix can be used as an unprivileged user in single-user mode.
+
+  _Default value:_ `0` / `0` / `"root"` / `"root"`
+
+`maxLayers` (Number; _optional_) []{#dockerTools-buildLayeredImage-arg-maxLayers}
+
+: The maximum number of layers that will be used by the generated image.
+  If a `fromImage` was specified, the number of layers used by `fromImage` will be subtracted from `maxLayers` to ensure that the image generated will have at most `maxLayers`.
+
+  :::{.caution}
+  Depending on the tool/runtime where the image will be used, there might be a limit to the number of layers that an image can have.
+  For Docker, see [this issue on GitHub](https://github.com/docker/docs/issues/8230).
+  :::
+
+  _Default value:_ 100.
+
+`extraCommands` (String; _optional_)
+
+: A bash script that will run in the context of the layer created with the contents specified by `contents`.
+  At the moment this script runs, only the contents directly specified by `contents` will be available as links.
+
+  _Default value:_ `""`.
+
+`fakeRootCommands` (String; _optional_)
+
+: A bash script that will run in the context of the layer created with the contents specified by `contents`.
+  During the process to generate that layer, the script in `extraCommands` will be run first, if specified.
+  After that, a {manpage}`fakeroot(1)` environment will be entered.
+  The script specified in `fakeRootCommands` runs inside the fakeroot environment, and the layer is then generated from the view of the files inside the fakeroot environment.
+
+  This is useful to change the owners of the files in the layer (by running `chown`, for example), or performing any other privileged operations related to file manipulation (by default, all files in the layer will be owned by root, and the build environment doesn't have enough privileges to directly perform privileged operations on these files).
+
+  For more details, see the manpage for {manpage}`fakeroot(1)`.
+
+  :::{.caution}
+  Due to how fakeroot works, static binaries cannot perform privileged file operations in `fakeRootCommands`, unless `enableFakechroot` is set to `true`.
+  :::
+
+  _Default value:_ `""`.
+
+`enableFakechroot` (Boolean; _optional_)
+
+: By default, the script specified in `fakeRootCommands` only runs inside a fakeroot environment.
+  If `enableFakechroot` is `true`, a more complete chroot environment will be created using [`proot`](https://proot-me.github.io/) before running the script in `fakeRootCommands`.
+  Files in the Nix store will be available.
+  This allows scripts that perform installation in `/` to work as expected.
+  This can be seen as an equivalent of `RUN ...` in a `Dockerfile`.
+
+  _Default value:_ `false`
+
+`includeStorePaths` (Boolean; _optional_)
+
+: The files specified in `contents` are put into layers in the generated image.
+  If `includeStorePaths` is `false`, the actual files will not be included in the generated image, and only links to them will be added instead.
+  It is **not recommended** to set this to `false` unless you have other tooling to insert the store paths via other means (such as bind mounting the host store) when running containers with the generated image.
+  If you don't provide any extra tooling, the generated image won't run properly.
+
+  See [](#ex-dockerTools-streamLayeredImage-exploringlayers) to understand the impact of setting `includeStorePaths` to `false`.
+
+  _Default value:_ `true`
+
+`passthru` (Attribute Set; _optional_)
+
+: Use this to pass any attributes as [passthru](#var-stdenv-passthru) for the resulting derivation.
+
+  _Default value:_ `{}`
+
+### Passthru outputs {#ssec-pkgs-dockerTools-streamLayeredImage-passthru-outputs}
+
+`streamLayeredImage` also defines its own [`passthru`](#var-stdenv-passthru) attributes:
+
+`imageTag` (String)
+
+: The tag of the generated image.
+  This is useful if no tag was specified in the attributes of the argument to the function, because an automatic tag will be used instead.
+  `imageTag` allows you to retrieve the value of the tag used in this case.
+
+### Examples {#ssec-pkgs-dockerTools-streamLayeredImage-examples}
+
+:::{.example #ex-dockerTools-streamLayeredImage-hello}
+# Streaming a layered Docker image
+
+The following package builds a **script** which, when run, will stream a layered Docker image that runs the `hello` executable from the `hello` package.
+The Docker image will have name `hello` and tag `latest`.
+
+```nix
+{ dockerTools, hello }:
+dockerTools.streamLayeredImage {
+  name = "hello";
+  tag = "latest";
+
+  contents = [ hello ];
+
+  config.Cmd = [ "/bin/hello" ];
+}
+```
+
+The result of building this package is a script.
+Running this script and piping it into `docker image load` gives you the same image that was built in [](#ex-dockerTools-buildLayeredImage-hello).
+Note that in this case, the image is never added to the Nix store, but instead streamed directly into Docker.
+
+```shell
+$ nix-build
+(output removed for clarity)
+/nix/store/wsz2xl8ckxnlb769irvq6jv1280dfvxd-stream-hello
+
+$ /nix/store/wsz2xl8ckxnlb769irvq6jv1280dfvxd-stream-hello | docker image load
+No 'fromImage' provided
+Creating layer 1 from paths: ['/nix/store/i93s7xxblavsacpy82zdbn4kplsyq48l-libunistring-1.1']
+Creating layer 2 from paths: ['/nix/store/ji01n9vinnj22nbrb86nx8a1ssgpilx8-libidn2-2.3.4']
+Creating layer 3 from paths: ['/nix/store/ldrslljw4rg026nw06gyrdwl78k77vyq-xgcc-12.3.0-libgcc']
+Creating layer 4 from paths: ['/nix/store/9y8pmvk8gdwwznmkzxa6pwyah52xy3nk-glibc-2.38-27']
+Creating layer 5 from paths: ['/nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1']
+Creating layer 6 with customisation...
+Adding manifests...
+Done.
+(some output removed for clarity)
+Loaded image: hello:latest
+```
+:::
+
+:::{.example #ex-dockerTools-streamLayeredImage-exploringlayers}
+# Exploring the layers in an image built with `streamLayeredImage`
+
+Assume the following package, which builds a layered Docker image with the `hello` package.
+
+```nix
+{ dockerTools, hello }:
+dockerTools.streamLayeredImage {
+  name = "hello";
+  contents = [ hello ];
+}
+```
+
+The `hello` package depends on 4 other packages:
+
+```shell
+$ nix-store --query -R $(nix-build -A hello)
+/nix/store/i93s7xxblavsacpy82zdbn4kplsyq48l-libunistring-1.1
+/nix/store/ji01n9vinnj22nbrb86nx8a1ssgpilx8-libidn2-2.3.4
+/nix/store/ldrslljw4rg026nw06gyrdwl78k77vyq-xgcc-12.3.0-libgcc
+/nix/store/9y8pmvk8gdwwznmkzxa6pwyah52xy3nk-glibc-2.38-27
+/nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1
+```
+
+This means that all these packages will be included in the image generated by `streamLayeredImage`.
+It will put each package in its own layer, for a total of 5 layers with actual files in them.
+A final layer will be created only with symlinks for the `hello` package.
+
+The image generated will have the following directory structure (some directories were collapsed for readability):
+
+```
+├── bin
+│   └── hello → /nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1/bin/hello
+├── nix
+│   └── store
+│       ├─⊕ 9y8pmvk8gdwwznmkzxa6pwyah52xy3nk-glibc-2.38-27
+│       ├─⊕ i93s7xxblavsacpy82zdbn4kplsyq48l-libunistring-1.1
+│       ├─⊕ ji01n9vinnj22nbrb86nx8a1ssgpilx8-libidn2-2.3.4
+│       ├─⊕ ldrslljw4rg026nw06gyrdwl78k77vyq-xgcc-12.3.0-libgcc
+│       └─⊕ zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1
+└── share
+    ├── info
+    │   └── hello.info → /nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1/share/info/hello.info
+    ├─⊕ locale
+    └── man
+        └── man1
+            └── hello.1.gz → /nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1/share/man/man1/hello.1.gz
+```
+
+Each of the packages in `/nix/store` comes from a layer in the image.
+The final layer adds the `/bin` and `/share` directories, but they only contain links to the actual files in `/nix/store`.
+
+If our package sets `includeStorePaths` to `false`, we'll end up with only the final layer with the links, but the actual files won't exist in the image:
+
+```nix
+{ dockerTools, hello }:
+dockerTools.streamLayeredImage {
+  name = "hello";
+  contents = [ hello ];
+  includeStorePaths = false;
+}
+```
+
+After building this package, the image will have the following directory structure:
+
+```
+├── bin
+│   └── hello → /nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1/bin/hello
+└── share
+    ├── info
+    │   └── hello.info → /nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1/share/info/hello.info
+    ├─⊕ locale
+    └── man
+        └── man1
+            └── hello.1.gz → /nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1/share/man/man1/hello.1.gz
+```
+
+Note how the links point to paths in `/nix/store`, but they're not included in the image itself.
+This is why you need extra tooling when using `includeStorePaths`:
+a container created from such image won't find any of the files it needs to run otherwise.
+:::
+
+::: {.example #ex-dockerTools-streamLayeredImage-configclosure}
+# Building a layered Docker image with packages directly in `config`
+
+The closure of `config` is automatically included in the generated image.
+The following package shows a more compact way to create the same output generated in [](#ex-dockerTools-streamLayeredImage-hello).
+
+```nix
+{ dockerTools, hello, lib }:
+dockerTools.streamLayeredImage {
+  name = "hello";
+  tag = "latest";
+  config.Cmd = [ "${lib.getExe hello}" ];
+}
+```
+:::
+
+[]{#ssec-pkgs-dockerTools-fetchFromRegistry}
+## pullImage {#ssec-pkgs-dockerTools-pullImage}
+
+This function is similar to the `docker image pull` command, which means it can be used to pull a Docker image from a registry that implements the [Docker Registry HTTP API V2](https://distribution.github.io/distribution/spec/api/).
+By default, the `docker.io` registry is used.
+
+The image will be downloaded as an uncompressed Docker-compatible repository tarball, which is suitable for use with other `dockerTools` functions such as [`buildImage`](#ssec-pkgs-dockerTools-buildImage), [`buildLayeredImage`](#ssec-pkgs-dockerTools-buildLayeredImage), and [`streamLayeredImage`](#ssec-pkgs-dockerTools-streamLayeredImage).
+
+This function requires two different types of hashes/digests to be specified:
+
+- One of them is used to identify a unique image within the registry (see the documentation for the `imageDigest` attribute).
+- The other is used by Nix to ensure the contents of the output haven't changed (see the documentation for the `sha256` attribute).
+
+Both hashes are required because they must uniquely identify some content in two completely different systems (the Docker registry and the Nix store), but their values will not be the same.
+See [](#ex-dockerTools-pullImage-nixprefetchdocker) for a tool that can help gather these values.
+
+### Inputs {#ssec-pkgs-dockerTools-pullImage-inputs}
+
+`pullImage` expects a single argument with the following attributes:
+
+`imageName` (String)
+
+: Specifies the name of the image to be downloaded, as well as the registry endpoint.
+  By default, the `docker.io` registry is used.
+  To specify a different registry, prepend the endpoint to `imageName`, separated by a slash (`/`).
+  See [](#ex-dockerTools-pullImage-differentregistry) for how to do that.
+
+`imageDigest` (String)
+
+: Specifies the digest of the image to be downloaded.
+
+  :::{.tip}
+  **Why can't I specify a tag to pull from, and have to use a digest instead?**
+
+  Tags are often updated to point to different image contents.
+  The most common example is the `latest` tag, which is usually updated whenever a newer image version is available.
+
+  An image tag isn't enough to guarantee the contents of an image won't change, but a digest guarantees this.
+  Providing a digest helps ensure that you will still be able to build the same Nix code and get the same output even if newer versions of an image are released.
+  :::
+
+`sha256` (String)
+
+: The hash of the image after it is downloaded.
+  Internally, this is passed to the [`outputHash`](https://nixos.org/manual/nix/stable/language/advanced-attributes#adv-attr-outputHash) attribute of the resulting derivation.
+  This is needed to provide a guarantee to Nix that the contents of the image haven't changed, because Nix doesn't support the value in `imageDigest`.
+
+`finalImageName` (String; _optional_)
+
+: Specifies the name that will be used for the image after it has been downloaded.
+  This only applies after the image is downloaded, and is not used to identify the image to be downloaded in the registry.
+  Use `imageName` for that instead.
+
+  _Default value:_ the same value specified in `imageName`.
+
+`finalImageTag` (String; _optional_)
+
+: Specifies the tag that will be used for the image after it has been downloaded.
+  This only applies after the image is downloaded, and is not used to identify the image to be downloaded in the registry.
+
+  _Default value:_ `"latest"`.
+
+`os` (String; _optional_)
+
+: Specifies the operating system of the image to pull.
+  If specified, its value should follow the [OCI Image Configuration Specification](https://github.com/opencontainers/image-spec/blob/main/config.md#properties), which should still be compatible with Docker.
+  According to the linked specification, all possible values for `$GOOS` in [the Go docs](https://go.dev/doc/install/source#environment) should be valid, but will commonly be one of `darwin` or `linux`.
+
+  _Default value:_ `"linux"`.
+
+`arch` (String; _optional_)
+
+: Specifies the architecture of the image to pull.
+  If specified, its value should follow the [OCI Image Configuration Specification](https://github.com/opencontainers/image-spec/blob/main/config.md#properties), which should still be compatible with Docker.
+  According to the linked specification, all possible values for `$GOARCH` in [the Go docs](https://go.dev/doc/install/source#environment) should be valid, but will commonly be one of `386`, `amd64`, `arm`, or `arm64`.
+
+  _Default value:_ the same value from `pkgs.go.GOARCH`.
+
+`tlsVerify` (Boolean; _optional_)
+
+: Used to enable or disable HTTPS and TLS certificate verification when communicating with the chosen Docker registry.
+  Setting this to `false` will make `pullImage` connect to the registry through HTTP.
+
+  _Default value:_ `true`.
+
+`name` (String; _optional_)
+
+: The name used for the output in the Nix store path.
+
+  _Default value:_ a value derived from `finalImageName` and `finalImageTag`, with some symbols replaced.
+  It is recommended to treat the default as an opaque value.
+
+### Examples {#ssec-pkgs-dockerTools-pullImage-examples}
+
+::: {.example #ex-dockerTools-pullImage-niximage}
+# Pulling the nixos/nix Docker image from the default registry
+
+This example pulls the [`nixos/nix` image](https://hub.docker.com/r/nixos/nix) and saves it in the Nix store.
+
+```nix
+{ dockerTools }:
+dockerTools.pullImage {
+  imageName = "nixos/nix";
+  imageDigest = "sha256:b8ea88f763f33dfda2317b55eeda3b1a4006692ee29e60ee54ccf6d07348c598";
+  finalImageName = "nix";
+  finalImageTag = "2.19.3";
+  sha256 = "zRwlQs1FiKrvHPaf8vWOR/Tlp1C5eLn1d9pE4BZg3oA=";
+}
+```
+:::
+
+::: {.example #ex-dockerTools-pullImage-differentregistry}
+# Pulling the nixos/nix Docker image from a specific registry
+
+This example pulls the [`coreos/etcd` image](https://quay.io/repository/coreos/etcd) from the `quay.io` registry.
+
+```nix
+{ dockerTools }:
+dockerTools.pullImage {
+  imageName = "quay.io/coreos/etcd";
+  imageDigest = "sha256:24a23053f29266fb2731ebea27f915bb0fb2ae1ea87d42d890fe4e44f2e27c5d";
+  finalImageName = "etcd";
+  finalImageTag = "v3.5.11";
+  sha256 = "Myw+85f2/EVRyMB3axECdmQ5eh9p1q77FWYKy8YpRWU=";
+}
+```
+:::
+
+::: {.example #ex-dockerTools-pullImage-nixprefetchdocker}
+# Finding the digest and hash values to use for `dockerTools.pullImage`
+
+Since [`dockerTools.pullImage`](#ssec-pkgs-dockerTools-pullImage) requires two different hashes, one can run the `nix-prefetch-docker` tool to find out the values for the hashes.
+The tool outputs some text for an attribute set which you can pass directly to `pullImage`.
+
+```shell
+$ nix run nixpkgs#nix-prefetch-docker -- --image-name nixos/nix --image-tag 2.19.3 --arch amd64 --os linux
+(some output removed for clarity)
+Writing manifest to image destination
+-> ImageName: nixos/nix
+-> ImageDigest: sha256:498fa2d7f2b5cb3891a4edf20f3a8f8496e70865099ba72540494cd3e2942634
+-> FinalImageName: nixos/nix
+-> FinalImageTag: latest
+-> ImagePath: /nix/store/4mxy9mn6978zkvlc670g5703nijsqc95-docker-image-nixos-nix-latest.tar
+-> ImageHash: 1q6cf2pdrasa34zz0jw7pbs6lvv52rq2aibgxccbwcagwkg2qj1q
+{
+  imageName = "nixos/nix";
+  imageDigest = "sha256:498fa2d7f2b5cb3891a4edf20f3a8f8496e70865099ba72540494cd3e2942634";
+  sha256 = "1q6cf2pdrasa34zz0jw7pbs6lvv52rq2aibgxccbwcagwkg2qj1q";
+  finalImageName = "nixos/nix";
+  finalImageTag = "latest";
+}
+```
+
+It is important to supply the `--arch` and `--os` arguments to `nix-prefetch-docker` to filter to a single image, in case there are multiple architectures and/or operating systems supported by the image name and tags specified.
+By default, `nix-prefetch-docker` will set `os` to `linux` and `arch` to `amd64`.
+
+Run `nix-prefetch-docker --help` for a list of all supported arguments:
+```shell
+$ nix run nixpkgs#nix-prefetch-docker -- --help
+(output removed for clarity)
+```
+:::
+
+## exportImage {#ssec-pkgs-dockerTools-exportImage}
+
+This function is similar to the `docker container export` command, which means it can be used to export an image's filesystem as an uncompressed tarball archive.
+The difference is that `docker container export` is applied to containers, but `dockerTools.exportImage` applies to Docker images.
+The resulting archive will not contain any image metadata (such as command to run with `docker container run`), only the filesystem contents.
+
+You can use this function to import an archive in Docker with `docker image import`.
+See [](#ex-dockerTools-exportImage-importingDocker) to understand how to do that.
+
+:::{.caution}
+`exportImage` works by unpacking the given image inside a VM.
+Because of this, using this function requires the `kvm` device to be available, see [`system-features`](https://nixos.org/manual/nix/stable/command-ref/conf-file.html#conf-system-features).
+:::
+
+### Inputs {#ssec-pkgs-dockerTools-exportImage-inputs}
+
+`exportImage` expects an argument with the following attributes:
+
+`fromImage` (Attribute Set or String)
+
+: The repository tarball of the image whose filesystem will be exported.
+  It must be a valid Docker image, such as one exported by `docker image save`, or another image built with the `dockerTools` utility functions.
+
+  If `name` is not specified, `fromImage` must be an Attribute Set corresponding to a derivation, i.e. it can't be a path to a tarball.
+  If `name` is specified, `fromImage` can be either an Attribute Set corresponding to a derivation or simply a path to a tarball.
+
+  See [](#ex-dockerTools-exportImage-naming) and [](#ex-dockerTools-exportImage-fromImagePath) to understand the connection between `fromImage`, `name`, and the name used for the output of `exportImage`.
+
+`fromImageName` (String or Null; _optional_)
+
+: Used to specify the image within the repository tarball in case it contains multiple images.
+  A value of `null` means that `exportImage` will use the first image available in the repository.
+
+  :::{.note}
+  This must be used with `fromImageTag`. Using only `fromImageName` without `fromImageTag` will make `exportImage` use the first image available in the repository.
+  :::
+
+  _Default value:_ `null`.
+
+`fromImageTag` (String or Null; _optional_)
+
+: Used to specify the image within the repository tarball in case it contains multiple images.
+  A value of `null` means that `exportImage` will use the first image available in the repository.
+
+  :::{.note}
+  This must be used with `fromImageName`. Using only `fromImageTag` without `fromImageName` will make `exportImage` use the first image available in the repository
+  :::
+
+  _Default value:_ `null`.
+
+`diskSize` (Number; _optional_)
+
+: Controls the disk size (in megabytes) of the VM used to unpack the image.
+
+  _Default value:_ 1024.
+
+`name` (String; _optional_)
+
+: The name used for the output in the Nix store path.
+
+  _Default value:_ the value of `fromImage.name`.
+
+### Examples {#ssec-pkgs-dockerTools-exportImage-examples}
+
+:::{.example #ex-dockerTools-exportImage-hello}
+# Exporting a Docker image with `dockerTools.exportImage`
+
+This example first builds a layered image with [`dockerTools.buildLayeredImage`](#ssec-pkgs-dockerTools-buildLayeredImage), and then exports its filesystem with `dockerTools.exportImage`.
+
+```nix
+{ dockerTools, hello }:
+dockerTools.exportImage {
+  name = "hello";
+  fromImage = dockerTools.buildLayeredImage {
+    name = "hello";
+    contents = [ hello ];
+  };
+}
+```
+
+When building the package above, we can see the layers of the Docker image being unpacked to produce the final output:
+
+```shell
+$ nix-build
+(some output removed for clarity)
+Unpacking base image...
+From-image name or tag wasn't set. Reading the first ID.
+Unpacking layer 5731199219418f175d1580dbca05677e69144425b2d9ecb60f416cd57ca3ca42/layer.tar
+tar: Removing leading `/' from member names
+Unpacking layer e2897bf34bb78c4a65736510204282d9f7ca258ba048c183d665bd0f3d24c5ec/layer.tar
+tar: Removing leading `/' from member names
+Unpacking layer 420aa5876dca4128cd5256da7dea0948e30ef5971712f82601718cdb0a6b4cda/layer.tar
+tar: Removing leading `/' from member names
+Unpacking layer ea5f4e620e7906c8ecbc506b5e6f46420e68d4b842c3303260d5eb621b5942e5/layer.tar
+tar: Removing leading `/' from member names
+Unpacking layer 65807b9abe8ab753fa97da8fb74a21fcd4725cc51e1b679c7973c97acd47ebcf/layer.tar
+tar: Removing leading `/' from member names
+Unpacking layer b7da2076b60ebc0ea6824ef641978332b8ac908d47b2d07ff31b9cc362245605/layer.tar
+Executing post-mount steps...
+Packing raw image...
+[    1.660036] reboot: Power down
+/nix/store/x6a5m7c6zdpqz1d8j7cnzpx9glzzvd2h-hello
+```
+
+The following command lists some of the contents of the output to verify that the structure of the archive is as expected:
+
+```shell
+$ tar --exclude '*/share/*' --exclude 'nix/store/*/*' -tvf /nix/store/x6a5m7c6zdpqz1d8j7cnzpx9glzzvd2h-hello
+drwxr-xr-x root/0            0 1979-12-31 16:00 ./
+drwxr-xr-x root/0            0 1979-12-31 16:00 ./bin/
+lrwxrwxrwx root/0            0 1979-12-31 16:00 ./bin/hello -> /nix/store/h92a9jd0lhhniv2q417hpwszd4jhys7q-hello-2.12.1/bin/hello
+dr-xr-xr-x root/0            0 1979-12-31 16:00 ./nix/
+dr-xr-xr-x root/0            0 1979-12-31 16:00 ./nix/store/
+dr-xr-xr-x root/0            0 1979-12-31 16:00 ./nix/store/05zbwhz8a7i2v79r9j21pl6m6cj0xi8k-libunistring-1.1/
+dr-xr-xr-x root/0            0 1979-12-31 16:00 ./nix/store/ayg5rhjhi9ic73hqw33mjqjxwv59ndym-xgcc-13.2.0-libgcc/
+dr-xr-xr-x root/0            0 1979-12-31 16:00 ./nix/store/h92a9jd0lhhniv2q417hpwszd4jhys7q-hello-2.12.1/
+dr-xr-xr-x root/0            0 1979-12-31 16:00 ./nix/store/m59xdgkgnjbk8kk6k6vbxmqnf82mk9s0-libidn2-2.3.4/
+dr-xr-xr-x root/0            0 1979-12-31 16:00 ./nix/store/p3jshbwxiwifm1py0yq544fmdyy98j8a-glibc-2.38-27/
+drwxr-xr-x root/0            0 1979-12-31 16:00 ./share/
+```
+:::
+
+:::{.example #ex-dockerTools-exportImage-importingDocker}
+# Importing an archive built with `dockerTools.exportImage` in Docker
+
+We will use the same package from [](#ex-dockerTools-exportImage-hello) and import it into Docker.
+
+```nix
+{ dockerTools, hello }:
+dockerTools.exportImage {
+  name = "hello";
+  fromImage = dockerTools.buildLayeredImage {
+    name = "hello";
+    contents = [ hello ];
+  };
+}
+```
+
+Building and importing it into Docker:
+
+```shell
+$ nix-build
+(output removed for clarity)
+/nix/store/x6a5m7c6zdpqz1d8j7cnzpx9glzzvd2h-hello
+$ docker image import /nix/store/x6a5m7c6zdpqz1d8j7cnzpx9glzzvd2h-hello
+sha256:1d42dba415e9b298ea0decf6497fbce954de9b4fcb2984f91e307c8fedc1f52f
+$ docker image ls
+REPOSITORY                              TAG                IMAGE ID       CREATED         SIZE
+<none>                                  <none>             1d42dba415e9   4 seconds ago   32.6MB
+```
+:::
+
+:::{.example #ex-dockerTools-exportImage-naming}
+# Exploring output naming with `dockerTools.exportImage`
+
+`exportImage` does not require a `name` attribute if `fromImage` is a derivation, which means that the following works:
+
+```nix
+{ dockerTools, hello }:
+dockerTools.exportImage {
+  fromImage = dockerTools.buildLayeredImage {
+    name = "hello";
+    contents = [ hello ];
+  };
+}
+```
+
+However, since [`dockerTools.buildLayeredImage`](#ssec-pkgs-dockerTools-buildLayeredImage)'s output ends with `.tar.gz`, the output of `exportImage` will also end with `.tar.gz`, even though the archive created with `exportImage` is uncompressed:
+
+```shell
+$ nix-build
+(output removed for clarity)
+/nix/store/by3f40xvc4l6bkis74l0fj4zsy0djgkn-hello.tar.gz
+$ file /nix/store/by3f40xvc4l6bkis74l0fj4zsy0djgkn-hello.tar.gz
+/nix/store/by3f40xvc4l6bkis74l0fj4zsy0djgkn-hello.tar.gz: POSIX tar archive (GNU)
+```
+
+If the archive was actually compressed, the output of file would've mentioned that fact.
+Because of this, it may be important to set a proper `name` attribute when using `exportImage` with other functions from `dockerTools`.
+:::
+
+:::{.example #ex-dockerTools-exportImage-fromImagePath}
+# Using `dockerTools.exportImage` with a path as `fromImage`
+
+It is possible to use a path as the value of the `fromImage` attribute when calling `dockerTools.exportImage`.
+However, when doing so, a `name` attribute **MUST** be specified, or you'll encounter an error when evaluating the Nix code.
+
+For this example, we'll assume a Docker tarball image named `image.tar.gz` exists in the same directory where our package is defined:
+
+```nix
+{ dockerTools }:
+dockerTools.exportImage {
+  name = "filesystem.tar";
+  fromImage = ./image.tar.gz;
+}
+```
+
+Building this will give us the expected output:
+
+```shell
+$ nix-build
+(output removed for clarity)
+/nix/store/w13l8h3nlkg0zv56k7rj0ai0l2zlf7ss-filesystem.tar
+```
+
+If you don't specify a `name` attribute, you'll encounter an evaluation error and the package won't build.
+:::
+
+## Environment Helpers {#ssec-pkgs-dockerTools-helpers}
+
+When building Docker images with Nix, you might also want to add certain files that are expected to be available globally by the software you're packaging.
+Simple examples are the `env` utility in `/usr/bin/env`, or trusted root TLS/SSL certificates.
+Such files will most likely not be included if you're building a Docker image from scratch with Nix, and they might also not be included if you're starting from a Docker image that doesn't include them.
+The helpers in this section are packages that provide some of these commonly-needed global files.
+
+Most of these helpers are packages, which means you have to add them to the list of contents to be included in the image (this changes depending on the function you're using to build the image).
+[](#ex-dockerTools-helpers-buildImage) and [](#ex-dockerTools-helpers-buildLayeredImage) show how to include these packages on `dockerTools` functions that build an image.
+For more details on how that works, see the documentation for the function you're using.
+
+### usrBinEnv {#sssec-pkgs-dockerTools-helpers-usrBinEnv}
+
+This provides the `env` utility at `/usr/bin/env`.
+This is currently implemented by linking to the `env` binary from the `coreutils` package, but is considered an implementation detail that could change in the future.
+
+### binSh {#sssec-pkgs-dockerTools-helpers-binSh}
+
+This provides a `/bin/sh` link to the `bash` binary from the `bashInteractive` package.
+Because of this, it supports cases such as running a command interactively in a container (for example by running `docker container run -it <image_name>`).
+
+### caCertificates {#sssec-pkgs-dockerTools-helpers-caCertificates}
+
+This adds trusted root TLS/SSL certificates from the `cacert` package in multiple locations in an attempt to be compatible with binaries built for multiple Linux distributions.
+The locations currently used are:
+
+- `/etc/ssl/certs/ca-bundle.crt`
+- `/etc/ssl/certs/ca-certificates.crt`
+- `/etc/pki/tls/certs/ca-bundle.crt`
+
+[]{#ssec-pkgs-dockerTools-fakeNss}
+### fakeNss {#sssec-pkgs-dockerTools-helpers-fakeNss}
+
+This is a re-export of the `fakeNss` package from Nixpkgs.
+See [](#sec-fakeNss).
+
+### shadowSetup {#ssec-pkgs-dockerTools-shadowSetup}
+
+This is a string containing a script that sets up files needed for [`shadow`](https://github.com/shadow-maint/shadow) to work (using the `shadow` package from Nixpkgs), and alters `PATH` to make all its utilities available in the same script.
+It is intended to be used with other dockerTools functions in attributes that expect scripts.
+After the script in `shadowSetup` runs, you'll then be able to add more commands that make use of the utilities in `shadow`, such as adding any extra users and/or groups.
+See [](#ex-dockerTools-shadowSetup-buildImage) and [](#ex-dockerTools-shadowSetup-buildLayeredImage) to better understand how to use it.
+
+`shadowSetup` achieves a result similar to [`fakeNss`](#sssec-pkgs-dockerTools-helpers-fakeNss), but only sets up a `root` user with different values for the home directory and the shell to use, in addition to setting up files for [PAM](https://en.wikipedia.org/wiki/Linux_PAM) and a {manpage}`login.defs(5)` file.
+
+:::{.caution}
+Using both `fakeNss` and `shadowSetup` at the same time will either cause your build to break or produce unexpected results.
+Use either `fakeNss` or `shadowSetup` depending on your use case, but avoid using both.
+:::
+
+:::{.note}
+When used with [`buildLayeredImage`](#ssec-pkgs-dockerTools-buildLayeredImage) or [`streamLayeredImage`](#ssec-pkgs-dockerTools-streamLayeredImage), you will have to set the `enableFakechroot` attribute to `true`, or else the script in `shadowSetup` won't run properly.
+See [](#ex-dockerTools-shadowSetup-buildLayeredImage).
+:::
+
+### Examples {#ssec-pkgs-dockerTools-helpers-examples}
+
+:::{.example #ex-dockerTools-helpers-buildImage}
+# Using `dockerTools`'s environment helpers with `buildImage`
+
+This example adds the [`binSh`](#sssec-pkgs-dockerTools-helpers-binSh) helper to a basic Docker image built with [`dockerTools.buildImage`](#ssec-pkgs-dockerTools-buildImage).
+This helper makes it possible to enter a shell inside the container.
+This is the `buildImage` equivalent of [](#ex-dockerTools-helpers-buildLayeredImage).
+
+```nix
+{ dockerTools, hello }:
+dockerTools.buildImage {
+  name = "env-helpers";
+  tag = "latest";
+
+  copyToRoot = [
+    hello
+    dockerTools.binSh
+  ];
+}
+```
+
+After building the image and loading it in Docker, we can create a container based on it and enter a shell inside the container.
+This is made possible by `binSh`.
+
+```shell
+$ nix-build
+(some output removed for clarity)
+/nix/store/2p0i3i04cgjlk71hsn7ll4kxaxxiv4qg-docker-image-env-helpers.tar.gz
+$ docker image load -i /nix/store/2p0i3i04cgjlk71hsn7ll4kxaxxiv4qg-docker-image-env-helpers.tar.gz
+(output removed for clarity)
+$ docker container run --rm -it env-helpers:latest /bin/sh
+sh-5.2# help
+GNU bash, version 5.2.21(1)-release (x86_64-pc-linux-gnu)
+(rest of output removed for clarity)
+```
+:::
+
+:::{.example #ex-dockerTools-helpers-buildLayeredImage}
+# Using `dockerTools`'s environment helpers with `buildLayeredImage`
+
+This example adds the [`binSh`](#sssec-pkgs-dockerTools-helpers-binSh) helper to a basic Docker image built with [`dockerTools.buildLayeredImage`](#ssec-pkgs-dockerTools-buildLayeredImage).
+This helper makes it possible to enter a shell inside the container.
+This is the `buildLayeredImage` equivalent of [](#ex-dockerTools-helpers-buildImage).
+
+```nix
+{ dockerTools, hello }:
+dockerTools.buildLayeredImage {
+  name = "env-helpers";
+  tag = "latest";
+
+  contents = [
+    hello
+    dockerTools.binSh
+  ];
+
+  config = {
+    Cmd = [ "/bin/hello" ];
+  };
+}
+```
+
+After building the image and loading it in Docker, we can create a container based on it and enter a shell inside the container.
+This is made possible by `binSh`.
+
+```shell
+$ nix-build
+(some output removed for clarity)
+/nix/store/rpf47f4z5b9qr4db4ach9yr4b85hjhxq-env-helpers.tar.gz
+$ docker image load -i /nix/store/rpf47f4z5b9qr4db4ach9yr4b85hjhxq-env-helpers.tar.gz
+(output removed for clarity)
+$ docker container run --rm -it env-helpers:latest /bin/sh
+sh-5.2# help
+GNU bash, version 5.2.21(1)-release (x86_64-pc-linux-gnu)
+(rest of output removed for clarity)
+```
+:::
+
+:::{.example #ex-dockerTools-shadowSetup-buildImage}
+# Using `dockerTools.shadowSetup` with `dockerTools.buildImage`
+
+This is an example that shows how to use `shadowSetup` with `dockerTools.buildImage`.
+Note that the extra script in `runAsRoot` uses `groupadd` and `useradd`, which are binaries provided by the `shadow` package.
+These binaries are added to the `PATH` by the `shadowSetup` script, but only for the duration of `runAsRoot`.
+
+```nix
+{ dockerTools, hello }:
+dockerTools.buildImage {
+  name = "shadow-basic";
+  tag = "latest";
+
+  copyToRoot = [ hello ];
+
+  runAsRoot = ''
+    ${dockerTools.shadowSetup}
+    groupadd -r hello
+    useradd -r -g hello hello
+    mkdir /data
+    chown hello:hello /data
+  '';
+
+  config = {
+    Cmd = [ "/bin/hello" ];
+    WorkingDir = "/data";
+  };
+}
+```
+:::
+
+:::{.example #ex-dockerTools-shadowSetup-buildLayeredImage}
+# Using `dockerTools.shadowSetup` with `dockerTools.buildLayeredImage`
+
+It accomplishes the same thing as [](#ex-dockerTools-shadowSetup-buildImage), but using `buildLayeredImage` instead.
+
+Note that the extra script in `fakeRootCommands` uses `groupadd` and `useradd`, which are binaries provided by the `shadow` package.
+These binaries are added to the `PATH` by the `shadowSetup` script, but only for the duration of `fakeRootCommands`.
+
+```nix
+{ dockerTools, hello }:
+dockerTools.buildLayeredImage {
+  name = "shadow-basic";
+  tag = "latest";
+
+  contents = [ hello ];
+
+  fakeRootCommands = ''
+    ${dockerTools.shadowSetup}
+    groupadd -r hello
+    useradd -r -g hello hello
+    mkdir /data
+    chown hello:hello /data
+  '';
+  enableFakechroot = true;
+
+  config = {
+    Cmd = [ "/bin/hello" ];
+    WorkingDir = "/data";
+  };
+}
+```
+:::
+
+[]{#ssec-pkgs-dockerTools-buildNixShellImage-arguments}
+## buildNixShellImage {#ssec-pkgs-dockerTools-buildNixShellImage}
+
+`buildNixShellImage` uses [`streamNixShellImage`](#ssec-pkgs-dockerTools-streamNixShellImage) underneath to build a compressed Docker-compatible repository tarball of an image that sets up an environment similar to that of running `nix-shell` on a derivation.
+Basically, `buildNixShellImage` runs the script created by `streamNixShellImage` to save the compressed image in the Nix store.
+
+`buildNixShellImage` supports the same options as `streamNixShellImage`, see [`streamNixShellImage`](#ssec-pkgs-dockerTools-streamNixShellImage) for details.
+
+[]{#ssec-pkgs-dockerTools-buildNixShellImage-example}
+### Examples {#ssec-pkgs-dockerTools-buildNixShellImage-examples}
+
+:::{.example #ex-dockerTools-buildNixShellImage-hello}
+# Building a Docker image with `buildNixShellImage` with the build environment for the `hello` package
+
+This example shows how to build the `hello` package inside a Docker container built with `buildNixShellImage`.
+The Docker image generated will have a name like `hello-<version>-env` and tag `latest`.
+This example is the `buildNixShellImage` equivalent of [](#ex-dockerTools-streamNixShellImage-hello).
+
+```nix
+{ dockerTools, hello }:
+dockerTools.buildNixShellImage {
+  drv = hello;
+  tag = "latest";
+}
+```
+
+The result of building this package is a `.tar.gz` file that can be loaded into Docker:
+
+```shell
+$ nix-build
+(some output removed for clarity)
+/nix/store/pkj1sgzaz31wl0pbvbg3yp5b3kxndqms-hello-2.12.1-env.tar.gz
+
+$ docker image load -i /nix/store/pkj1sgzaz31wl0pbvbg3yp5b3kxndqms-hello-2.12.1-env.tar.gz
+(some output removed for clarity)
+Loaded image: hello-2.12.1-env:latest
+```
+
+After starting an interactive container, the derivation can be built by running `buildDerivation`, and the output can be executed as expected:
+
+```shell
+$ docker container run -it hello-2.12.1-env:latest
+[nix-shell:~]$ buildDerivation
+Running phase: unpackPhase
+unpacking source archive /nix/store/pa10z4ngm0g83kx9mssrqzz30s84vq7k-hello-2.12.1.tar.gz
+source root is hello-2.12.1
+(some output removed for clarity)
+Running phase: fixupPhase
+shrinking RPATHs of ELF executables and libraries in /nix/store/f2vs29jibd7lwxyj35r9h87h6brgdysz-hello-2.12.1
+shrinking /nix/store/f2vs29jibd7lwxyj35r9h87h6brgdysz-hello-2.12.1/bin/hello
+checking for references to /build/ in /nix/store/f2vs29jibd7lwxyj35r9h87h6brgdysz-hello-2.12.1...
+gzipping man pages under /nix/store/f2vs29jibd7lwxyj35r9h87h6brgdysz-hello-2.12.1/share/man/
+patching script interpreter paths in /nix/store/f2vs29jibd7lwxyj35r9h87h6brgdysz-hello-2.12.1
+stripping (with command strip and flags -S -p) in  /nix/store/f2vs29jibd7lwxyj35r9h87h6brgdysz-hello-2.12.1/bin
+
+[nix-shell:~]$ $out/bin/hello
+Hello, world!
+```
+:::
+
+## streamNixShellImage {#ssec-pkgs-dockerTools-streamNixShellImage}
+
+`streamNixShellImage` builds a **script** which, when run, will stream to stdout a Docker-compatible repository tarball of an image that sets up an environment similar to that of running `nix-shell` on a derivation.
+This means that `streamNixShellImage` does not output an image into the Nix store, but only a script that builds the image, saving on IO and disk/cache space, particularly with large images.
+See [](#ex-dockerTools-streamNixShellImage-hello) to understand how to load in Docker the image generated by this script.
+
+The environment set up by `streamNixShellImage` somewhat resembles the Nix sandbox typically used by `nix-build`, with a major difference being that access to the internet is allowed.
+It also behaves like an interactive `nix-shell`, running things like `shellHook` (see [](#ex-dockerTools-streamNixShellImage-addingShellHook)) and setting an interactive prompt.
+If the derivation is buildable (i.e. `nix-build` can be used on it), running `buildDerivation` in the container will build the derivation, with all its outputs being available in the correct `/nix/store` paths, pointed to by the respective environment variables (e.g. `$out`).
+
+::: {.caution}
+The environment in the image doesn't match `nix-shell` or `nix-build` exactly, and this function is known not to work correctly for fixed-output derivations, content-addressed derivations, impure derivations and other special types of derivations.
+:::
+
+### Inputs {#ssec-pkgs-dockerTools-streamNixShellImage-inputs}
+
+`streamNixShellImage` expects one argument with the following attributes:
+
+`drv` (Attribute Set)
+
+: The derivation for which the environment in the image will be set up.
+  Adding packages to the Docker image is possible by extending the list of `nativeBuildInputs` of this derivation.
+  See [](#ex-dockerTools-streamNixShellImage-extendingBuildInputs) for how to do that.
+  Similarly, you can extend the image initialization script by extending `shellHook`.
+  [](#ex-dockerTools-streamNixShellImage-addingShellHook) shows how to do that.
+
+`name` (String; _optional_)
+
+: The name of the generated image.
+
+  _Default value:_ the value of `drv.name + "-env"`.
+
+`tag` (String or Null; _optional_)
+
+: Tag of the generated image.
+  If `null`, the hash of the nix derivation that builds the Docker image will be used as the tag.
+
+  _Default value:_ `null`.
+
+`uid` (Number; _optional_)
+
+: The user ID to run the container as.
+  This can be seen as a `nixbld` build user.
+
+  _Default value:_ 1000.
+
+`gid` (Number; _optional_)
+
+: The group ID to run the container as.
+  This can be seen as a `nixbld` build group.
+
+  _Default value:_ 1000.
+
+`homeDirectory` (String; _optional_)
+
+: The home directory of the user the container is running as.
+
+  _Default value:_ `/build`.
+
+`shell` (String; _optional_)
+
+: The path to the `bash` binary to use as the shell.
+  This shell is started when running the image.
+  This can be seen as an equivalent of the `NIX_BUILD_SHELL` [environment variable](https://nixos.org/manual/nix/stable/command-ref/nix-shell.html#environment-variables) for {manpage}`nix-shell(1)`.
+
+  _Default value:_ the `bash` binary from the `bashInteractive` package.
+
+`command` (String or Null; _optional_)
+
+: If specified, this command will be run in the environment of the derivation in an interactive shell.
+  A call to `exit` will be added after the command if it is specified, so the shell will exit after it's finished running.
+  This can be seen as an equivalent of the `--command` option in {manpage}`nix-shell(1)`.
+
+  _Default value:_ `null`.
+
+`run` (String or Null; _optional_)
+
+: Similar to the `command` attribute, but runs the command in a non-interactive shell instead.
+  A call to `exit` will be added after the command if it is specified, so the shell will exit after it's finished running.
+  This can be seen as an equivalent of the `--run` option in {manpage}`nix-shell(1)`.
+
+  _Default value:_ `null`.
+
+### Examples {#ssec-pkgs-dockerTools-streamNixShellImage-examples}
+
+:::{.example #ex-dockerTools-streamNixShellImage-hello}
+# Building a Docker image with `streamNixShellImage` with the build environment for the `hello` package
+
+This example shows how to build the `hello` package inside a Docker container built with `streamNixShellImage`.
+The Docker image generated will have a name like `hello-<version>-env` and tag `latest`.
+This example is the `streamNixShellImage` equivalent of [](#ex-dockerTools-buildNixShellImage-hello).
+
+```nix
+{ dockerTools, hello }:
+dockerTools.streamNixShellImage {
+  drv = hello;
+  tag = "latest";
+}
+```
+
+The result of building this package is a script.
+Running this script and piping it into `docker image load` gives you the same image that was built in [](#ex-dockerTools-buildNixShellImage-hello).
+
+```shell
+$ nix-build
+(some output removed for clarity)
+/nix/store/8vhznpz2frqazxnd8pgdvf38jscdypax-stream-hello-2.12.1-env
+
+$ /nix/store/8vhznpz2frqazxnd8pgdvf38jscdypax-stream-hello-2.12.1-env | docker image load
+(some output removed for clarity)
+Loaded image: hello-2.12.1-env:latest
+```
+
+After starting an interactive container, the derivation can be built by running `buildDerivation`, and the output can be executed as expected:
+
+```shell
+$ docker container run -it hello-2.12.1-env:latest
+[nix-shell:~]$ buildDerivation
+Running phase: unpackPhase
+unpacking source archive /nix/store/pa10z4ngm0g83kx9mssrqzz30s84vq7k-hello-2.12.1.tar.gz
+source root is hello-2.12.1
+(some output removed for clarity)
+Running phase: fixupPhase
+shrinking RPATHs of ELF executables and libraries in /nix/store/f2vs29jibd7lwxyj35r9h87h6brgdysz-hello-2.12.1
+shrinking /nix/store/f2vs29jibd7lwxyj35r9h87h6brgdysz-hello-2.12.1/bin/hello
+checking for references to /build/ in /nix/store/f2vs29jibd7lwxyj35r9h87h6brgdysz-hello-2.12.1...
+gzipping man pages under /nix/store/f2vs29jibd7lwxyj35r9h87h6brgdysz-hello-2.12.1/share/man/
+patching script interpreter paths in /nix/store/f2vs29jibd7lwxyj35r9h87h6brgdysz-hello-2.12.1
+stripping (with command strip and flags -S -p) in  /nix/store/f2vs29jibd7lwxyj35r9h87h6brgdysz-hello-2.12.1/bin
+
+[nix-shell:~]$ $out/bin/hello
+Hello, world!
+```
+:::
+
+:::{.example #ex-dockerTools-streamNixShellImage-extendingBuildInputs}
+# Adding extra packages to a Docker image built with `streamNixShellImage`
+
+This example shows how to add extra packages to an image built with `streamNixShellImage`.
+In this case, we'll add the `cowsay` package.
+The Docker image generated will have a name like `hello-<version>-env` and tag `latest`.
+This example uses [](#ex-dockerTools-streamNixShellImage-hello) as a starting point.
+
+```nix
+{ dockerTools, cowsay, hello }:
+dockerTools.streamNixShellImage {
+  tag = "latest";
+  drv = hello.overrideAttrs (old: {
+    nativeBuildInputs = old.nativeBuildInputs or [] ++ [
+      cowsay
+    ];
+  });
+}
+```
+
+The result of building this package is a script which can be run and piped into `docker image load` to load the generated image.
+
+```shell
+$ nix-build
+(some output removed for clarity)
+/nix/store/h5abh0vljgzg381lna922gqknx6yc0v7-stream-hello-2.12.1-env
+
+$ /nix/store/h5abh0vljgzg381lna922gqknx6yc0v7-stream-hello-2.12.1-env | docker image load
+(some output removed for clarity)
+Loaded image: hello-2.12.1-env:latest
+```
+
+After starting an interactive container, we can verify the extra package is available by running `cowsay`:
+
+```shell
+$ docker container run -it hello-2.12.1-env:latest
+[nix-shell:~]$ cowsay "Hello, world!"
+ _______________
+< Hello, world! >
+ ---------------
+        \   ^__^
+         \  (oo)\_______
+            (__)\       )\/\
+                ||----w |
+                ||     ||
+```
+:::
+
+:::{.example #ex-dockerTools-streamNixShellImage-addingShellHook}
+# Adding a `shellHook` to a Docker image built with `streamNixShellImage`
+
+This example shows how to add a `shellHook` command to an image built with `streamNixShellImage`.
+In this case, we'll simply output the string `Hello, world!`.
+The Docker image generated will have a name like `hello-<version>-env` and tag `latest`.
+This example uses [](#ex-dockerTools-streamNixShellImage-hello) as a starting point.
+
+```nix
+{ dockerTools, hello }:
+dockerTools.streamNixShellImage {
+  tag = "latest";
+  drv = hello.overrideAttrs (old: {
+    shellHook = ''
+      ${old.shellHook or ""}
+      echo "Hello, world!"
+    '';
+  });
+}
+```
+
+The result of building this package is a script which can be run and piped into `docker image load` to load the generated image.
+
+```shell
+$ nix-build
+(some output removed for clarity)
+/nix/store/iz4dhdvgzazl5vrgyz719iwjzjy6xlx1-stream-hello-2.12.1-env
+
+$ /nix/store/iz4dhdvgzazl5vrgyz719iwjzjy6xlx1-stream-hello-2.12.1-env | docker image load
+(some output removed for clarity)
+Loaded image: hello-2.12.1-env:latest
+```
+
+After starting an interactive container, we can see the result of the `shellHook`:
+
+```shell
+$ docker container run -it hello-2.12.1-env:latest
+Hello, world!
+
+[nix-shell:~]$
+```
+:::
diff --git a/nixpkgs/doc/build-helpers/images/makediskimage.section.md b/nixpkgs/doc/build-helpers/images/makediskimage.section.md
new file mode 100644
index 000000000000..e50479c4e83e
--- /dev/null
+++ b/nixpkgs/doc/build-helpers/images/makediskimage.section.md
@@ -0,0 +1,108 @@
+# `<nixpkgs/nixos/lib/make-disk-image.nix>` {#sec-make-disk-image}
+
+`<nixpkgs/nixos/lib/make-disk-image.nix>` is a function to create _disk images_ in multiple formats: raw, QCOW2 (QEMU), QCOW2-Compressed (compressed version), VDI (VirtualBox), VPC (VirtualPC).
+
+This function can create images in two ways:
+
+- using `cptofs` without any virtual machine to create a Nix store disk image,
+- using a virtual machine to create a full NixOS installation.
+
+When testing early-boot or lifecycle parts of NixOS such as a bootloader or multiple generations, it is necessary to opt for a full NixOS system installation.
+Whereas for many web servers, applications, it is possible to work with a Nix store only disk image and is faster to build.
+
+NixOS tests also use this function when preparing the VM. The `cptofs` method is used when `virtualisation.useBootLoader` is false (the default). Otherwise the second method is used.
+
+## Features {#sec-make-disk-image-features}
+
+For reference, read the function signature source code for documentation on arguments: <https://github.com/NixOS/nixpkgs/blob/master/nixos/lib/make-disk-image.nix>.
+Features are separated in various sections depending on if you opt for a Nix-store only image or a full NixOS image.
+
+### Common {#sec-make-disk-image-features-common}
+
+- arbitrary NixOS configuration
+- automatic or bound disk size: `diskSize` parameter, `additionalSpace` can be set when `diskSize` is `auto` to add a constant of disk space
+- multiple partition table layouts: EFI, legacy, legacy + GPT, hybrid, none through `partitionTableType` parameter
+- OVMF or EFI firmwares and variables templates can be customized
+- root filesystem `fsType` can be customized to whatever `mkfs.${fsType}` exist during operations
+- root filesystem label can be customized, defaults to `nix-store` if it's a Nix store image, otherwise `nixpkgs/nixos`
+- arbitrary code can be executed after disk image was produced with `postVM`
+- the current nixpkgs can be realized as a channel in the disk image, which will change the hash of the image when the sources are updated
+- additional store paths can be provided through `additionalPaths`
+
+### Full NixOS image {#sec-make-disk-image-features-full-image}
+
+- arbitrary contents with permissions can be placed in the target filesystem using `contents`
+- a `/etc/nixpkgs/nixos/configuration.nix` can be provided through `configFile`
+- bootloaders are supported
+- EFI variables can be mutated during image production and the result is exposed in `$out`
+- boot partition size when partition table is `efi` or `hybrid`
+
+### On bit-to-bit reproducibility {#sec-make-disk-image-features-reproducibility}
+
+Images are **NOT** deterministic, please do not hesitate to try to fix this, source of determinisms are (not exhaustive) :
+
+- bootloader installation have timestamps
+- SQLite Nix store database contain registration times
+- `/etc/shadow` is in a non-deterministic order
+
+A `deterministic` flag is available for best efforts determinism.
+
+## Usage {#sec-make-disk-image-usage}
+
+To produce a Nix-store only image:
+```nix
+let
+  pkgs = import <nixpkgs> {};
+  lib = pkgs.lib;
+  make-disk-image = import <nixpkgs/nixos/lib/make-disk-image.nix>;
+in
+  make-disk-image {
+    inherit pkgs lib;
+    config = {};
+    additionalPaths = [ ];
+    format = "qcow2";
+    onlyNixStore = true;
+    partitionTableType = "none";
+    installBootLoader = false;
+    touchEFIVars = false;
+    diskSize = "auto";
+    additionalSpace = "0M"; # Defaults to 512M.
+    copyChannel = false;
+  }
+```
+
+Some arguments can be left out, they are shown explicitly for the sake of the example.
+
+Building this derivation will provide a QCOW2 disk image containing only the Nix store and its registration information.
+
+To produce a NixOS installation image disk with UEFI and bootloader installed:
+```nix
+let
+  pkgs = import <nixpkgs> {};
+  lib = pkgs.lib;
+  make-disk-image = import <nixpkgs/nixos/lib/make-disk-image.nix>;
+  evalConfig = import <nixpkgs/nixos/lib/eval-config.nix>;
+in
+  make-disk-image {
+    inherit pkgs lib;
+    config = evalConfig {
+      modules = [
+        {
+          fileSystems."/" = { device = "/dev/vda"; fsType = "ext4"; autoFormat = true; };
+          boot.grub.device = "/dev/vda";
+        }
+      ];
+    };
+    format = "qcow2";
+    onlyNixStore = false;
+    partitionTableType = "legacy+gpt";
+    installBootLoader = true;
+    touchEFIVars = true;
+    diskSize = "auto";
+    additionalSpace = "0M"; # Defaults to 512M.
+    copyChannel = false;
+    memSize = 2048; # Qemu VM memory size in megabytes. Defaults to 1024M.
+  }
+```
+
+
diff --git a/nixpkgs/doc/build-helpers/images/ocitools.section.md b/nixpkgs/doc/build-helpers/images/ocitools.section.md
new file mode 100644
index 000000000000..96627615ffb5
--- /dev/null
+++ b/nixpkgs/doc/build-helpers/images/ocitools.section.md
@@ -0,0 +1,104 @@
+# pkgs.ociTools {#sec-pkgs-ociTools}
+
+`pkgs.ociTools` is a set of functions for creating runtime container bundles according to the [OCI runtime specification v1.0.0](https://github.com/opencontainers/runtime-spec/blob/v1.0.0/spec.md).
+It makes no assumptions about the container runner you choose to use to run the created container.
+
+The set of functions in `pkgs.ociTools` currently does not handle the [OCI image specification](https://github.com/opencontainers/image-spec).
+
+At a high-level an OCI implementation would download an OCI Image then unpack that image into an OCI Runtime filesystem bundle.
+At this point the OCI Runtime Bundle would be run by an OCI Runtime.
+`pkgs.ociTools` provides utilities to create OCI Runtime bundles.
+
+## buildContainer {#ssec-pkgs-ociTools-buildContainer}
+
+This function creates an OCI runtime container (consisting of a `config.json` and a root filesystem directory) that runs a single command inside of it.
+The nix store of the container will contain all referenced dependencies of the given command.
+
+This function has an assumption that the container will run on POSIX platforms, and sets configurations (such as the user running the process or certain mounts) according to this assumption.
+Because of this, a container built with `buildContainer` will not work on Windows or other non-POSIX platforms without modifications to the container configuration.
+These modifications aren't supported by `buildContainer`.
+
+For `linux` platforms, `buildContainer` also configures the following namespaces (see {manpage}`unshare(1)`) to isolate the OCI container from the global namespace:
+PID, network, mount, IPC, and UTS.
+
+Note that no user namespace is created, which means that you won't be able to run the container unless you are the `root` user.
+
+### Inputs {#ssec-pkgs-ociTools-buildContainer-inputs}
+
+`buildContainer` expects an argument with the following attributes:
+
+`args` (List of String)
+
+: Specifies a set of arguments to run inside the container.
+  Any packages referenced by `args` will be made available inside the container.
+
+`mounts` (Attribute Set; _optional_)
+
+: Would specify additional mounts that the runtime must make available to the container.
+
+  :::{.warning}
+  As explained in [issue #290879](https://github.com/NixOS/nixpkgs/issues/290879), this attribute is currently ignored.
+  :::
+
+  :::{.note}
+  `buildContainer` includes a minimal set of necessary filesystems to be mounted into the container, and this set can't be changed with the `mounts` attribute.
+  :::
+
+  _Default value:_ `{}`.
+
+`readonly` (Boolean; _optional_)
+
+: If `true`, sets the container's root filesystem as read-only.
+
+  _Default value:_ `false`.
+
+`os` **DEPRECATED**
+
+: Specifies the operating system on which the container filesystem is based on.
+  If specified, its value should follow the [OCI Image Configuration Specification](https://github.com/opencontainers/image-spec/blob/main/config.md#properties).
+  According to the linked specification, all possible values for `$GOOS` in [the Go docs](https://go.dev/doc/install/source#environment) should be valid, but will commonly be one of `darwin` or `linux`.
+
+  _Default value:_ `"linux"`.
+
+`arch` **DEPRECATED**
+
+: Used to specify the architecture for which the binaries in the container filesystem have been compiled.
+  If specified, its value should follow the [OCI Image Configuration Specification](https://github.com/opencontainers/image-spec/blob/main/config.md#properties).
+  According to the linked specification, all possible values for `$GOARCH` in [the Go docs](https://go.dev/doc/install/source#environment) should be valid, but will commonly be one of `386`, `amd64`, `arm`, or `arm64`.
+
+  _Default value:_ `x86_64`.
+
+### Examples {#ssec-pkgs-ociTools-buildContainer-examples}
+
+::: {.example #ex-ociTools-buildContainer-bash}
+# Creating an OCI runtime container that runs `bash`
+
+This example uses `ociTools.buildContainer` to create a simple container that runs `bash`.
+
+```nix
+{ ociTools, lib, bash }:
+ociTools.buildContainer {
+  args = [
+    (lib.getExe bash)
+  ];
+
+  readonly = false;
+}
+```
+
+As an example of how to run the container generated by this package, we'll use `runc` to start the container.
+Any other tool that supports OCI containers could be used instead.
+
+```shell
+$ nix-build
+(some output removed for clarity)
+/nix/store/7f9hgx0arvhzp2a3qphp28rxbn748l25-join
+
+$ cd /nix/store/7f9hgx0arvhzp2a3qphp28rxbn748l25-join
+$ nix-shell -p runc
+[nix-shell:/nix/store/7f9hgx0arvhzp2a3qphp28rxbn748l25-join]$ sudo runc run ocitools-example
+help
+GNU bash, version 5.2.26(1)-release (x86_64-pc-linux-gnu)
+(some output removed for clarity)
+```
+:::
diff --git a/nixpkgs/doc/build-helpers/images/portableservice.section.md b/nixpkgs/doc/build-helpers/images/portableservice.section.md
new file mode 100644
index 000000000000..c271bc775dba
--- /dev/null
+++ b/nixpkgs/doc/build-helpers/images/portableservice.section.md
@@ -0,0 +1,174 @@
+# pkgs.portableService {#sec-pkgs-portableService}
+
+`pkgs.portableService` is a function to create [Portable Services](https://systemd.io/PORTABLE_SERVICES/) in a read-only, immutable, `squashfs` raw disk image.
+This lets you use Nix to build images which can be run on many recent Linux distributions.
+
+::: {.note}
+Portable services are supported starting with systemd 239 (released on 2018-06-22).
+:::
+
+The generated image will contain the file system structure as required by the Portable Services specification, along with the packages given to `portableService` and all of their dependencies.
+When generated, the image will exist in the Nix store with the `.raw` file extension, as required by the specification.
+See [](#ex-portableService-hello) to understand how to use the output of `portableService`.
+
+## Inputs {#ssec-pkgs-portableService-inputs}
+
+`portableService` expects one argument with the following attributes:
+
+`pname` (String)
+
+: The name of the portable service.
+  The generated image will be named according to the template `$pname_$version.raw`, which is supported by the Portable Services specification.
+
+`version` (String)
+
+: The version of the portable service.
+  The generated image will be named according to the template `$pname_$version.raw`, which is supported by the Portable Services specification.
+
+`units` (List of Attribute Set)
+
+: A list of derivations for systemd unit files.
+  Each derivation must produce a single file, and must have a name that starts with the value of `pname` and ends with the suffix of the unit type (e.g. ".service", ".socket", ".timer", and so on).
+  See [](#ex-portableService-hello) to better understand this naming constraint.
+
+`description` (String or Null; _optional_)
+
+: If specified, the value is added as `PORTABLE_PRETTY_NAME` to the `/etc/os-release` file in the generated image.
+  This could be used to provide more information to anyone inspecting the image.
+
+  _Default value:_ `null`.
+
+`homepage` (String or Null; _optional_)
+
+: If specified, the value is added as `HOME_URL` to the `/etc/os-release` file in the generated image.
+  This could be used to provide more information to anyone inspecting the image.
+
+  _Default value:_ `null`.
+
+`symlinks` (List of Attribute Set; _optional_)
+
+: A list of attribute sets in the format `{object, symlink}`.
+  For each item in the list, `portableService` will create a symlink in the path specified by `symlink` (relative to the root of the image) that points to `object`.
+
+  All packages that `object` depends on and their dependencies are automatically copied into the image.
+
+  This can be used to create symlinks for applications that assume some files to exist globally (`/etc/ssl` or `/bin/bash`, for example).
+  See [](#ex-portableService-symlinks) to understand how to do that.
+
+  _Default value:_ `[]`.
+
+`contents` (List of Attribute Set; _optional_)
+
+: A list of additional derivations to be included as-is in the image.
+  These derivations will be included directly in a `/nix/store` directory inside the image.
+
+  _Default value:_ `[]`.
+
+`squashfsTools` (Attribute Set; _optional_)
+
+: Allows you to override the package that provides {manpage}`mksquashfs(1)`, which is used internally by `portableService`.
+
+  _Default value:_ `pkgs.squashfsTools`.
+
+`squash-compression` (String; _optional_)
+
+: Passed as the compression option to {manpage}`mksquashfs(1)`, which is used internally by `portableService`.
+
+  _Default value:_ `"xz -Xdict-size 100%"`.
+
+`squash-block-size` (String; _optional_)
+
+: Passed as the block size option to {manpage}`mksquashfs(1)`, which is used internally by `portableService`.
+
+  _Default value:_ `"1M"`.
+
+## Examples {#ssec-pkgs-portableService-examples}
+
+[]{#ex-pkgs-portableService}
+:::{.example #ex-portableService-hello}
+# Building a Portable Service image
+
+The following example builds a Portable Service image with the `hello` package, along with a service unit that runs it.
+
+```nix
+{ lib, writeText, portableService, hello }:
+let
+  hello-service = writeText "hello.service" ''
+    [Unit]
+    Description=Hello world service
+
+    [Service]
+    Type=oneshot
+    ExecStart=${lib.getExe hello}
+  '';
+in
+portableService {
+  pname = "hello";
+  inherit (hello) version;
+  units = [ hello-service ];
+}
+```
+
+After building the package, the generated image can be loaded into a system through {manpage}`portablectl(1)`:
+
+```shell
+$ nix-build
+(some output removed for clarity)
+/nix/store/8c20z1vh7z8w8dwagl8w87b45dn5k6iq-hello-img-2.12.1
+
+$ portablectl attach /nix/store/8c20z1vh7z8w8dwagl8w87b45dn5k6iq-hello-img-2.12.1/hello_2.12.1.raw
+Created directory /etc/systemd/system.attached.
+Created directory /etc/systemd/system.attached/hello.service.d.
+Written /etc/systemd/system.attached/hello.service.d/20-portable.conf.
+Created symlink /etc/systemd/system.attached/hello.service.d/10-profile.conf → /usr/lib/systemd/portable/profile/default/service.conf.
+Copied /etc/systemd/system.attached/hello.service.
+Created symlink /etc/portables/hello_2.12.1.raw → /nix/store/8c20z1vh7z8w8dwagl8w87b45dn5k6iq-hello-img-2.12.1/hello_2.12.1.raw.
+
+$ systemctl start hello
+$ journalctl -u hello
+Feb 28 22:39:16 hostname systemd[1]: Starting Hello world service...
+Feb 28 22:39:16 hostname hello[102887]: Hello, world!
+Feb 28 22:39:16 hostname systemd[1]: hello.service: Deactivated successfully.
+Feb 28 22:39:16 hostname systemd[1]: Finished Hello world service.
+
+$ portablectl detach hello_2.12.1
+Removed /etc/systemd/system.attached/hello.service.
+Removed /etc/systemd/system.attached/hello.service.d/10-profile.conf.
+Removed /etc/systemd/system.attached/hello.service.d/20-portable.conf.
+Removed /etc/systemd/system.attached/hello.service.d.
+Removed /etc/portables/hello_2.12.1.raw.
+Removed /etc/systemd/system.attached.
+```
+:::
+
+:::{.example #ex-portableService-symlinks}
+# Specifying symlinks when building a Portable Service image
+
+Some services may expect files or directories to be available globally.
+An example is a service which expects all trusted SSL certificates to exist in a specific location by default.
+
+To make things available globally, you must specify the `symlinks` attribute when using `portableService`.
+The following package builds on the package from [](#ex-portableService-hello) to make `/etc/ssl` available globally (this is only for illustrative purposes, because `hello` doesn't use `/etc/ssl`).
+
+```nix
+{ lib, writeText, portableService, hello, cacert }:
+let
+  hello-service = writeText "hello.service" ''
+    [Unit]
+    Description=Hello world service
+
+    [Service]
+    Type=oneshot
+    ExecStart=${lib.getExe hello}
+  '';
+in
+portableService {
+  pname = "hello";
+  inherit (hello) version;
+  units = [ hello-service ];
+  symlinks = [
+    { object = "${cacert}/etc/ssl"; symlink = "/etc/ssl"; }
+  ];
+}
+```
+:::
diff --git a/nixpkgs/doc/build-helpers/special.md b/nixpkgs/doc/build-helpers/special.md
new file mode 100644
index 000000000000..9da278f094dd
--- /dev/null
+++ b/nixpkgs/doc/build-helpers/special.md
@@ -0,0 +1,12 @@
+# Special build helpers {#chap-special}
+
+This chapter describes several special build helpers.
+
+```{=include=} sections
+special/fakenss.section.md
+special/fhs-environments.section.md
+special/makesetuphook.section.md
+special/mkshell.section.md
+special/vm-tools.section.md
+special/checkpoint-build.section.md
+```
diff --git a/nixpkgs/doc/build-helpers/special/checkpoint-build.section.md b/nixpkgs/doc/build-helpers/special/checkpoint-build.section.md
new file mode 100644
index 000000000000..a1ce5608f246
--- /dev/null
+++ b/nixpkgs/doc/build-helpers/special/checkpoint-build.section.md
@@ -0,0 +1,43 @@
+# pkgs.checkpointBuildTools  {#sec-checkpoint-build}
+
+`pkgs.checkpointBuildTools` provides a way to build derivations incrementally. It consists of two functions to make checkpoint builds using Nix possible.
+
+For hermeticity, Nix derivations do not allow any state to be carried over between builds, making a transparent incremental build within a derivation impossible.
+
+However, we can tell Nix explicitly what the previous build state was, by representing that previous state as a derivation output. This allows the passed build state to be used for an incremental build.
+
+To change a normal derivation to a checkpoint based build, these steps must be taken:
+  - apply `prepareCheckpointBuild` on the desired derivation, e.g.
+```nix
+{
+  checkpointArtifacts = (pkgs.checkpointBuildTools.prepareCheckpointBuild pkgs.virtualbox);
+}
+```
+  - change something you want in the sources of the package, e.g. use a source override:
+```nix
+{
+  changedVBox = pkgs.virtualbox.overrideAttrs (old: {
+    src = path/to/vbox/sources;
+  });
+}
+```
+  - use `mkCheckpointBuild changedVBox checkpointArtifacts`
+  - enjoy shorter build times
+
+## Example {#sec-checkpoint-build-example}
+```nix
+{ pkgs ? import <nixpkgs> {} }:
+let
+  inherit (pkgs.checkpointBuildTools)
+    prepareCheckpointBuild
+    mkCheckpointBuild
+    ;
+  helloCheckpoint = prepareCheckpointBuild pkgs.hello;
+  changedHello = pkgs.hello.overrideAttrs (_: {
+    doCheck = false;
+    patchPhase = ''
+      sed -i 's/Hello, world!/Hello, Nix!/g' src/hello.c
+    '';
+  });
+in mkCheckpointBuild changedHello helloCheckpoint
+```
diff --git a/nixpkgs/doc/build-helpers/special/fakenss.section.md b/nixpkgs/doc/build-helpers/special/fakenss.section.md
new file mode 100644
index 000000000000..c890752c0653
--- /dev/null
+++ b/nixpkgs/doc/build-helpers/special/fakenss.section.md
@@ -0,0 +1,77 @@
+# fakeNss {#sec-fakeNss}
+
+Provides `/etc/passwd` and `/etc/group` files that contain `root` and `nobody`, allowing user/group lookups to work in binaries that insist on doing those.
+This might be a better choice than a custom script running `useradd` and related utilities if you only need those files to exist with some entries.
+
+`fakeNss` also provides `/etc/nsswitch.conf`, configuring NSS host resolution to first check `/etc/hosts` before checking DNS, since the default in the absence of a config file (`dns [!UNAVAIL=return] files`) is quite unexpected.
+
+It also creates an empty directory at `/var/empty` because it uses that as the home directory for the `root` and `nobody` users.
+The `/var/empty` directory can also be used as a `chroot` target to prevent file access in processes that do not need to access files, if your container runs such processes.
+
+The user entries created by `fakeNss` use the `/bin/sh` shell, which is not provided by `fakeNss` because in most cases it won't be used.
+If you need that to be available, see [`dockerTools.binSh`](#sssec-pkgs-dockerTools-helpers-binSh) or provide your own.
+
+## Inputs {#sec-fakeNss-inputs}
+
+`fakeNss` is made available in Nixpkgs as a package rather than a function, but it has two attributes that can be overridden and might be useful in particular cases.
+For more details on how overriding works, see [](#ex-fakeNss-overriding) and [](#sec-pkg-override).
+
+`extraPasswdLines` (List of Strings; _optional_)
+
+: A list of lines that will be added to `/etc/passwd`.
+  Useful if extra users need to exist in the output of `fakeNss`.
+  If `extraPasswdLines` is specified, it will **not** override the `root` and `nobody` entries created by `fakeNss`.
+  Those entries will always exist.
+
+  Lines specified here must follow the format in {manpage}`passwd(5)`.
+
+  _Default value:_ `[]`.
+
+`extraGroupLines` (List of Strings; _optional_)
+
+: A list of lines that will be added to `/etc/group`.
+  Useful if extra groups need to exist in the output of `fakeNss`.
+  If `extraGroupLines` is specified, it will **not** override the `root` and `nobody` entries created by `fakeNss`.
+  Those entries will always exist.
+
+  Lines specified here must follow the format in {manpage}`group(5)`.
+
+  _Default value:_ `[]`.
+
+## Examples {#sec-fakeNss-examples}
+
+:::{.example #ex-fakeNss-dockerTools-buildImage}
+# Using `fakeNss` with `dockerTools.buildImage`
+
+This example shows how to use `fakeNss` as-is.
+It is useful with functions in `dockerTools` to allow building Docker images that have the `/etc/passwd` and `/etc/group` files.
+This example includes the `hello` binary in the image so it can do something besides just have the extra files.
+
+```nix
+{ dockerTools, fakeNss, hello }:
+dockerTools.buildImage {
+  name = "image-with-passwd";
+  tag = "latest";
+
+  copyToRoot = [ fakeNss hello ];
+
+  config = {
+    Cmd = [ "/bin/hello" ];
+  };
+}
+```
+:::
+
+:::{.example #ex-fakeNss-overriding}
+# Using `fakeNss` with an override to add extra lines
+
+The following code uses `override` to add extra lines to `/etc/passwd` and `/etc/group` to create another user and group entry.
+
+```nix
+{ fakeNss }:
+fakeNss.override {
+  extraPasswdLines = ["newuser:x:9001:9001:new user:/var/empty:/bin/sh"];
+  extraGroupLines = ["newuser:x:9001:"];
+}
+```
+:::
diff --git a/nixpkgs/doc/build-helpers/special/fhs-environments.section.md b/nixpkgs/doc/build-helpers/special/fhs-environments.section.md
new file mode 100644
index 000000000000..8145fbd730f7
--- /dev/null
+++ b/nixpkgs/doc/build-helpers/special/fhs-environments.section.md
@@ -0,0 +1,56 @@
+# buildFHSEnv {#sec-fhs-environments}
+
+`buildFHSEnv` provides a way to build and run FHS-compatible lightweight sandboxes. It creates an isolated root filesystem with the host's `/nix/store`, so its footprint in terms of disk space is quite small. This allows you to run software which is hard or unfeasible to patch for NixOS; 3rd-party source trees with FHS assumptions, games distributed as tarballs, software with integrity checking and/or external self-updated binaries for instance.
+It uses Linux' namespaces feature to create temporary lightweight environments which are destroyed after all child processes exit, without requiring elevated privileges. It works similar to containerisation technology such as Docker or FlatPak but provides no security-relevant separation from the host system.
+
+Accepted arguments are:
+
+- `name`
+        The name of the environment and the wrapper executable.
+- `targetPkgs`
+        Packages to be installed for the main host's architecture (i.e. x86_64 on x86_64 installations). Along with libraries binaries are also installed.
+- `multiPkgs`
+        Packages to be installed for all architectures supported by a host (i.e. i686 and x86_64 on x86_64 installations). Only libraries are installed by default.
+- `multiArch`
+        Whether to install 32bit multiPkgs into the FHSEnv in 64bit environments
+- `extraBuildCommands`
+        Additional commands to be executed for finalizing the directory structure.
+- `extraBuildCommandsMulti`
+        Like `extraBuildCommands`, but executed only on multilib architectures.
+- `extraOutputsToInstall`
+        Additional derivation outputs to be linked for both target and multi-architecture packages.
+- `extraInstallCommands`
+        Additional commands to be executed for finalizing the derivation with runner script.
+- `runScript`
+        A shell command to be executed inside the sandbox. It defaults to `bash`. Command line arguments passed to the resulting wrapper are appended to this command by default.
+        This command must be escaped; i.e. `"foo app" --do-stuff --with "some file"`. See `lib.escapeShellArgs`.
+- `profile`
+        Optional script for `/etc/profile` within the sandbox.
+
+You can create a simple environment using a `shell.nix` like this:
+
+```nix
+{ pkgs ? import <nixpkgs> {} }:
+
+(pkgs.buildFHSEnv {
+  name = "simple-x11-env";
+  targetPkgs = pkgs: (with pkgs; [
+    udev
+    alsa-lib
+  ]) ++ (with pkgs.xorg; [
+    libX11
+    libXcursor
+    libXrandr
+  ]);
+  multiPkgs = pkgs: (with pkgs; [
+    udev
+    alsa-lib
+  ]);
+  runScript = "bash";
+}).env
+```
+
+Running `nix-shell` on it would drop you into a shell inside an FHS env where those libraries and binaries are available in FHS-compliant paths. Applications that expect an FHS structure (i.e. proprietary binaries) can run inside this environment without modification.
+You can build a wrapper by running your binary in `runScript`, e.g. `./bin/start.sh`. Relative paths work as expected.
+
+Additionally, the FHS builder links all relocated gsettings-schemas (the glib setup-hook moves them to `share/gsettings-schemas/${name}/glib-2.0/schemas`) to their standard FHS location. This means you don't need to wrap binaries with `wrapGAppsHook`.
diff --git a/nixpkgs/doc/build-helpers/special/makesetuphook.section.md b/nixpkgs/doc/build-helpers/special/makesetuphook.section.md
new file mode 100644
index 000000000000..e83164b7eb70
--- /dev/null
+++ b/nixpkgs/doc/build-helpers/special/makesetuphook.section.md
@@ -0,0 +1,37 @@
+# pkgs.makeSetupHook {#sec-pkgs.makeSetupHook}
+
+`pkgs.makeSetupHook` is a build helper that produces hooks that go in to `nativeBuildInputs`
+
+## Usage {#sec-pkgs.makeSetupHook-usage}
+
+```nix
+pkgs.makeSetupHook {
+  name = "something-hook";
+  propagatedBuildInputs = [ pkgs.commandsomething ];
+  depsTargetTargetPropagated = [ pkgs.libsomething ];
+} ./script.sh
+```
+
+### setup hook that depends on the hello package and runs hello and @shell@ is substituted with path to bash {#sec-pkgs.makeSetupHook-usage-example}
+
+```nix
+pkgs.makeSetupHook {
+    name = "run-hello-hook";
+    propagatedBuildInputs = [ pkgs.hello ];
+    substitutions = { shell = "${pkgs.bash}/bin/bash"; };
+    passthru.tests.greeting = callPackage ./test { };
+    meta.platforms = lib.platforms.linux;
+} (writeScript "run-hello-hook.sh" ''
+    #!@shell@
+    hello
+'')
+```
+
+## Attributes {#sec-pkgs.makeSetupHook-attributes}
+
+* `name` Set the name of the hook.
+* `propagatedBuildInputs` Runtime dependencies (such as binaries) of the hook.
+* `depsTargetTargetPropagated` Non-binary dependencies.
+* `meta`
+* `passthru`
+* `substitutions` Variables for `substituteAll`
diff --git a/nixpkgs/doc/build-helpers/special/mkshell.section.md b/nixpkgs/doc/build-helpers/special/mkshell.section.md
new file mode 100644
index 000000000000..e39bef7468e3
--- /dev/null
+++ b/nixpkgs/doc/build-helpers/special/mkshell.section.md
@@ -0,0 +1,41 @@
+# pkgs.mkShell {#sec-pkgs-mkShell}
+
+`pkgs.mkShell` is a specialized `stdenv.mkDerivation` that removes some
+repetition when using it with `nix-shell` (or `nix develop`).
+
+## Usage {#sec-pkgs-mkShell-usage}
+
+Here is a common usage example:
+
+```nix
+{ pkgs ? import <nixpkgs> {} }:
+pkgs.mkShell {
+  packages = [ pkgs.gnumake ];
+
+  inputsFrom = [ pkgs.hello pkgs.gnutar ];
+
+  shellHook = ''
+    export DEBUG=1
+  '';
+}
+```
+
+## Attributes {#sec-pkgs-mkShell-attributes}
+
+* `name` (default: `nix-shell`). Set the name of the derivation.
+* `packages` (default: `[]`). Add executable packages to the `nix-shell` environment.
+* `inputsFrom` (default: `[]`). Add build dependencies of the listed derivations to the `nix-shell` environment.
+* `shellHook` (default: `""`). Bash statements that are executed by `nix-shell`.
+
+... all the attributes of `stdenv.mkDerivation`.
+
+## Variants {#sec-pkgs-mkShell-variants}
+
+`pkgs.mkShellNoCC` is a variant that uses `stdenvNoCC` instead of `stdenv` as base environment. This is useful if no C compiler is needed in the shell environment.
+
+## Building the shell {#sec-pkgs-mkShell-building}
+
+This derivation output will contain a text file that contains a reference to
+all the build inputs. This is useful in CI where we want to make sure that
+every derivation, and its dependencies, build properly. Or when creating a GC
+root so that the build dependencies don't get garbage-collected.
diff --git a/nixpkgs/doc/build-helpers/special/vm-tools.section.md b/nixpkgs/doc/build-helpers/special/vm-tools.section.md
new file mode 100644
index 000000000000..8feab04902d8
--- /dev/null
+++ b/nixpkgs/doc/build-helpers/special/vm-tools.section.md
@@ -0,0 +1,148 @@
+# vmTools {#sec-vm-tools}
+
+A set of VM related utilities, that help in building some packages in more advanced scenarios.
+
+## `vmTools.createEmptyImage` {#vm-tools-createEmptyImage}
+
+A bash script fragment that produces a disk image at `destination`.
+
+### Attributes {#vm-tools-createEmptyImage-attributes}
+
+* `size`. The disk size, in MiB.
+* `fullName`. Name that will be written to `${destination}/nix-support/full-name`.
+* `destination` (optional, default `$out`). Where to write the image files.
+
+## `vmTools.runInLinuxVM` {#vm-tools-runInLinuxVM}
+
+Run a derivation in a Linux virtual machine (using Qemu/KVM).
+By default, there is no disk image; the root filesystem is a `tmpfs`, and the Nix store is shared with the host (via the [9P protocol](https://wiki.qemu.org/Documentation/9p#9p_Protocol)).
+Thus, any pure Nix derivation should run unmodified.
+
+If the build fails and Nix is run with the `-K/--keep-failed` option, a script `run-vm` will be left behind in the temporary build directory that allows you to boot into the VM and debug it interactively.
+
+### Attributes {#vm-tools-runInLinuxVM-attributes}
+
+* `preVM` (optional). Shell command to be evaluated *before* the VM is started (i.e., on the host).
+* `memSize` (optional, default `512`). The memory size of the VM in MiB.
+* `diskImage` (optional). A file system image to be attached to `/dev/sda`.
+  Note that currently we expect the image to contain a filesystem, not a full disk image with a partition table etc.
+
+### Examples {#vm-tools-runInLinuxVM-examples}
+
+Build the derivation hello inside a VM:
+```nix
+{ pkgs }: with pkgs; with vmTools;
+runInLinuxVM hello
+```
+
+Build inside a VM with extra memory:
+```nix
+{ pkgs }: with pkgs; with vmTools;
+runInLinuxVM (hello.overrideAttrs (_: { memSize = 1024; }))
+```
+
+Use VM with a disk image (implicitly sets `diskImage`, see [`vmTools.createEmptyImage`](#vm-tools-createEmptyImage)):
+```nix
+{ pkgs }: with pkgs; with vmTools;
+runInLinuxVM (hello.overrideAttrs (_: {
+  preVM = createEmptyImage {
+    size = 1024;
+    fullName = "vm-image";
+  };
+}))
+```
+
+## `vmTools.extractFs` {#vm-tools-extractFs}
+
+Takes a file, such as an ISO, and extracts its contents into the store.
+
+### Attributes {#vm-tools-extractFs-attributes}
+
+* `file`. Path to the file to be extracted.
+  Note that currently we expect the image to contain a filesystem, not a full disk image with a partition table etc.
+* `fs` (optional). Filesystem of the contents of the file.
+
+### Examples {#vm-tools-extractFs-examples}
+
+Extract the contents of an ISO file:
+```nix
+{ pkgs }: with pkgs; with vmTools;
+extractFs { file = ./image.iso; }
+```
+
+## `vmTools.extractMTDfs` {#vm-tools-extractMTDfs}
+
+Like [](#vm-tools-extractFs), but it makes use of a [Memory Technology Device (MTD)](https://en.wikipedia.org/wiki/Memory_Technology_Device).
+
+## `vmTools.runInLinuxImage` {#vm-tools-runInLinuxImage}
+
+Like [](#vm-tools-runInLinuxVM), but instead of using `stdenv` from the Nix store, run the build using the tools provided by `/bin`, `/usr/bin`, etc. from the specified filesystem image, which typically is a filesystem containing a [FHS](https://en.wikipedia.org/wiki/Filesystem_Hierarchy_Standard)-based Linux distribution.
+
+## `vmTools.makeImageTestScript` {#vm-tools-makeImageTestScript}
+
+Generate a script that can be used to run an interactive session in the given image.
+
+### Examples {#vm-tools-makeImageTestScript-examples}
+
+Create a script for running a Fedora 27 VM:
+```nix
+{ pkgs }: with pkgs; with vmTools;
+makeImageTestScript diskImages.fedora27x86_64
+```
+
+Create a script for running an Ubuntu 20.04 VM:
+```nix
+{ pkgs }: with pkgs; with vmTools;
+makeImageTestScript diskImages.ubuntu2004x86_64
+```
+
+## `vmTools.diskImageFuns` {#vm-tools-diskImageFuns}
+
+A set of functions that build a predefined set of minimal Linux distributions images.
+
+### Images {#vm-tools-diskImageFuns-images}
+
+* Fedora
+  * `fedora26x86_64`
+  * `fedora27x86_64`
+* CentOS
+  * `centos6i386`
+  * `centos6x86_64`
+  * `centos7x86_64`
+* Ubuntu
+  * `ubuntu1404i386`
+  * `ubuntu1404x86_64`
+  * `ubuntu1604i386`
+  * `ubuntu1604x86_64`
+  * `ubuntu1804i386`
+  * `ubuntu1804x86_64`
+  * `ubuntu2004i386`
+  * `ubuntu2004x86_64`
+  * `ubuntu2204i386`
+  * `ubuntu2204x86_64`
+* Debian
+  * `debian10i386`
+  * `debian10x86_64`
+  * `debian11i386`
+  * `debian11x86_64`
+
+### Attributes {#vm-tools-diskImageFuns-attributes}
+
+* `size` (optional, defaults to `4096`). The size of the image, in MiB.
+* `extraPackages` (optional). A list names of additional packages from the distribution that should be included in the image.
+
+### Examples {#vm-tools-diskImageFuns-examples}
+
+8GiB image containing Firefox in addition to the default packages:
+```nix
+{ pkgs }: with pkgs; with vmTools;
+diskImageFuns.ubuntu2004x86_64 { extraPackages = [ "firefox" ]; size = 8192; }
+```
+
+## `vmTools.diskImageExtraFuns` {#vm-tools-diskImageExtraFuns}
+
+Shorthand for `vmTools.diskImageFuns.<attr> { extraPackages = ... }`.
+
+## `vmTools.diskImages` {#vm-tools-diskImages}
+
+Shorthand for `vmTools.diskImageFuns.<attr> { }`.
diff --git a/nixpkgs/doc/build-helpers/testers.chapter.md b/nixpkgs/doc/build-helpers/testers.chapter.md
new file mode 100644
index 000000000000..b734cbbbd4e2
--- /dev/null
+++ b/nixpkgs/doc/build-helpers/testers.chapter.md
@@ -0,0 +1,280 @@
+# Testers {#chap-testers}
+
+This chapter describes several testing builders which are available in the `testers` namespace.
+
+## `hasPkgConfigModules` {#tester-hasPkgConfigModules}
+
+<!-- Old anchor name so links still work -->
+[]{#tester-hasPkgConfigModule}
+Checks whether a package exposes a given list of `pkg-config` modules.
+If the `moduleNames` argument is omitted, `hasPkgConfigModules` will use `meta.pkgConfigModules`.
+
+:::{.example #ex-haspkgconfigmodules-defaultvalues}
+
+# Check that `pkg-config` modules are exposed using default values
+
+```nix
+{
+  passthru.tests.pkg-config = testers.hasPkgConfigModules {
+    package = finalAttrs.finalPackage;
+  };
+
+  meta.pkgConfigModules = [ "libfoo" ];
+}
+```
+
+:::
+
+:::{.example #ex-haspkgconfigmodules-explicitmodules}
+
+# Check that `pkg-config` modules are exposed using explicit module names
+
+```nix
+{
+  passthru.tests.pkg-config = testers.hasPkgConfigModules {
+    package = finalAttrs.finalPackage;
+    moduleNames = [ "libfoo" ];
+  };
+}
+```
+
+:::
+
+## `testVersion` {#tester-testVersion}
+
+Checks that the output from running a command contains the specified version string in it as a whole word.
+
+Although simplistic, this test assures that the main program can run.
+While there's no substitute for a real test case, it does catch dynamic linking errors and such.
+It also provides some protection against accidentally building the wrong version, for example when using an "old" hash in a fixed-output derivation.
+
+By default, the command to be run will be inferred from the given `package` attribute:
+it will check `meta.mainProgram` first, and fall back to `pname` or `name`.
+The default argument to the command is `--version`, and the version to be checked will be inferred from the given `package` attribute as well.
+
+:::{.example #ex-testversion-hello}
+
+# Check a program version using all the default values
+
+This example will run the command `hello --version`, and then check that the version of the `hello` package is in the output of the command.
+
+```nix
+{
+  passthru.tests.version = testers.testVersion { package = hello; };
+}
+```
+
+:::
+
+:::{.example #ex-testversion-different-commandversion}
+
+# Check the program version using a specified command and expected version string
+
+This example will run the command `leetcode -V`, and then check that `leetcode 0.4.2` is in the output of the command as a whole word (separated by whitespaces).
+This means that an output like "leetcode 0.4.21" would fail the tests, and an output like "You're running leetcode 0.4.2" would pass the tests.
+
+A common usage of the `version` attribute is to specify `version = "v${version}"`.
+
+```nix
+{
+  version = "0.4.2";
+
+  passthru.tests.version = testers.testVersion {
+    package = leetcode-cli;
+    command = "leetcode -V";
+    version = "leetcode ${version}";
+  };
+}
+```
+
+:::
+
+## `testBuildFailure` {#tester-testBuildFailure}
+
+Make sure that a build does not succeed. This is useful for testing testers.
+
+This returns a derivation with an override on the builder, with the following effects:
+
+ - Fail the build when the original builder succeeds
+ - Move `$out` to `$out/result`, if it exists (assuming `out` is the default output)
+ - Save the build log to `$out/testBuildFailure.log` (same)
+
+While `testBuildFailure` is designed to keep changes to the original builder's environment to a minimum, some small changes are inevitable:
+
+ - The file `$TMPDIR/testBuildFailure.log` is present. It should not be deleted.
+ - `stdout` and `stderr` are a pipe instead of a tty. This could be improved.
+ - One or two extra processes are present in the sandbox during the original builder's execution.
+ - The derivation and output hashes are different, but not unusual.
+ - The derivation includes a dependency on `buildPackages.bash` and `expect-failure.sh`, which is built to include a transitive dependency on `buildPackages.coreutils` and possibly more.
+   These are not added to `PATH` or any other environment variable, so they should be hard to observe.
+
+:::{.example #ex-testBuildFailure-showingenvironmentchanges}
+
+# Check that a build fails, and verify the changes made during build
+
+```nix
+runCommand "example" {
+  failed = testers.testBuildFailure (runCommand "fail" {} ''
+    echo ok-ish >$out
+    echo failing though
+    exit 3
+  '');
+} ''
+  grep -F 'ok-ish' $failed/result
+  grep -F 'failing though' $failed/testBuildFailure.log
+  [[ 3 = $(cat $failed/testBuildFailure.exit) ]]
+  touch $out
+''
+```
+
+:::
+
+## `testEqualContents` {#tester-equalContents}
+
+Check that two paths have the same contents.
+
+:::{.example #ex-testEqualContents-toyexample}
+
+# Check that two paths have the same contents
+
+```nix
+testers.testEqualContents {
+  assertion = "sed -e performs replacement";
+  expected = writeText "expected" ''
+    foo baz baz
+  '';
+  actual = runCommand "actual" {
+    # not really necessary for a package that's in stdenv
+    nativeBuildInputs = [ gnused ];
+    base = writeText "base" ''
+      foo bar baz
+    '';
+  } ''
+    sed -e 's/bar/baz/g' $base >$out
+  '';
+}
+```
+
+:::
+
+## `testEqualDerivation` {#tester-testEqualDerivation}
+
+Checks that two packages produce the exact same build instructions.
+
+This can be used to make sure that a certain difference of configuration, such as the presence of an overlay does not cause a cache miss.
+
+When the derivations are equal, the return value is an empty file.
+Otherwise, the build log explains the difference via `nix-diff`.
+
+:::{.example #ex-testEqualDerivation-hello}
+
+# Check that two packages produce the same derivation
+
+```nix
+testers.testEqualDerivation
+  "The hello package must stay the same when enabling checks."
+  hello
+  (hello.overrideAttrs(o: { doCheck = true; }))
+```
+
+:::
+
+## `invalidateFetcherByDrvHash` {#tester-invalidateFetcherByDrvHash}
+
+Use the derivation hash to invalidate the output via name, for testing.
+
+Type: `(a@{ name, ... } -> Derivation) -> a -> Derivation`
+
+Normally, fixed output derivations can and should be cached by their output hash only, but for testing we want to re-fetch everytime the fetcher changes.
+
+Changes to the fetcher become apparent in the drvPath, which is a hash of how to fetch, rather than a fixed store path.
+By inserting this hash into the name, we can make sure to re-run the fetcher every time the fetcher changes.
+
+This relies on the assumption that Nix isn't clever enough to reuse its database of local store contents to optimize fetching.
+
+You might notice that the "salted" name derives from the normal invocation, not the final derivation.
+`invalidateFetcherByDrvHash` has to invoke the fetcher function twice:
+once to get a derivation hash, and again to produce the final fixed output derivation.
+
+:::{.example #ex-invalidateFetcherByDrvHash-nix}
+
+# Prevent nix from reusing the output of a fetcher
+
+```nix
+{
+  tests.fetchgit = testers.invalidateFetcherByDrvHash fetchgit {
+    name = "nix-source";
+    url = "https://github.com/NixOS/nix";
+    rev = "9d9dbe6ed05854e03811c361a3380e09183f4f4a";
+    hash = "sha256-7DszvbCNTjpzGRmpIVAWXk20P0/XTrWZ79KSOGLrUWY=";
+  };
+}
+```
+
+:::
+
+## `runNixOSTest` {#tester-runNixOSTest}
+
+A helper function that behaves exactly like the NixOS `runTest`, except it also assigns this Nixpkgs package set as the `pkgs` of the test and makes the `nixpkgs.*` options read-only.
+
+If your test is part of the Nixpkgs repository, or if you need a more general entrypoint, see ["Calling a test" in the NixOS manual](https://nixos.org/manual/nixos/stable/index.html#sec-calling-nixos-tests).
+
+:::{.example #ex-runNixOSTest-hello}
+
+# Run a NixOS test using `runNixOSTest`
+
+```nix
+pkgs.testers.runNixOSTest ({ lib, ... }: {
+  name = "hello";
+  nodes.machine = { pkgs, ... }: {
+    environment.systemPackages = [ pkgs.hello ];
+  };
+  testScript = ''
+    machine.succeed("hello")
+  '';
+})
+```
+
+:::
+
+## `nixosTest` {#tester-nixosTest}
+
+Run a NixOS VM network test using this evaluation of Nixpkgs.
+
+NOTE: This function is primarily for external use. NixOS itself uses `make-test-python.nix` directly. Packages defined in Nixpkgs [reuse NixOS tests via `nixosTests`, plural](#ssec-nixos-tests-linking).
+
+It is mostly equivalent to the function `import ./make-test-python.nix` from the [NixOS manual](https://nixos.org/nixos/manual/index.html#sec-nixos-tests), except that the current application of Nixpkgs (`pkgs`) will be used, instead of letting NixOS invoke Nixpkgs anew.
+
+If a test machine needs to set NixOS options under `nixpkgs`, it must set only the `nixpkgs.pkgs` option.
+
+### Parameter {#tester-nixosTest-parameter}
+
+A [NixOS VM test network](https://nixos.org/nixos/manual/index.html#sec-nixos-tests), or path to it. Example:
+
+```nix
+{
+  name = "my-test";
+  nodes = {
+    machine1 = { lib, pkgs, nodes, ... }: {
+      environment.systemPackages = [ pkgs.hello ];
+      services.foo.enable = true;
+    };
+    # machine2 = ...;
+  };
+  testScript = ''
+    start_all()
+    machine1.wait_for_unit("foo.service")
+    machine1.succeed("hello | foo-send")
+  '';
+}
+```
+
+### Result {#tester-nixosTest-result}
+
+A derivation that runs the VM test.
+
+Notable attributes:
+
+ * `nodes`: the evaluated NixOS configurations. Useful for debugging and exploring the configuration.
+
+ * `driverInteractive`: a script that launches an interactive Python session in the context of the `testScript`.
diff --git a/nixpkgs/doc/build-helpers/trivial-build-helpers.chapter.md b/nixpkgs/doc/build-helpers/trivial-build-helpers.chapter.md
new file mode 100644
index 000000000000..4f2754903f9b
--- /dev/null
+++ b/nixpkgs/doc/build-helpers/trivial-build-helpers.chapter.md
@@ -0,0 +1,710 @@
+# Trivial build helpers {#chap-trivial-builders}
+
+Nixpkgs provides a variety of wrapper functions that help build commonly useful derivations.
+Like [`stdenv.mkDerivation`](#sec-using-stdenv), each of these build helpers creates a derivation, but the arguments passed are different (usually simpler) from those required by `stdenv.mkDerivation`.
+
+## `runCommand` {#trivial-builder-runCommand}
+
+`runCommand :: String -> AttrSet -> String -> Derivation`
+
+The result of `runCommand name drvAttrs buildCommand` is a derivation that is built by running the specified shell commands.
+
+By default `runCommand` runs in a stdenv with no compiler environment, whereas [`runCommandCC`](#trivial-builder-runCommandCC) uses the default stdenv, `pkgs.stdenv`.
+
+`name :: String`
+:   The name that Nix will append to the store path in the same way that `stdenv.mkDerivation` uses its `name` attribute.
+
+`drvAttr :: AttrSet`
+:   Attributes to pass to the underlying call to [`stdenv.mkDerivation`](#chap-stdenv).
+
+`buildCommand :: String`
+:   Shell commands to run in the derivation builder.
+
+    ::: {.note}
+    You have to create a file or directory `$out` for Nix to be able to run the builder successfully.
+    :::
+
+::: {.example #ex-runcommand-simple}
+# Invocation of `runCommand`
+
+```nix
+(import <nixpkgs> {}).runCommand "my-example" {} ''
+  echo My example command is running
+
+  mkdir $out
+
+  echo I can write data to the Nix store > $out/message
+
+  echo I can also run basic commands like:
+
+  echo ls
+  ls
+
+  echo whoami
+  whoami
+
+  echo date
+  date
+''
+```
+:::
+
+## `runCommandCC` {#trivial-builder-runCommandCC}
+
+This works just like `runCommand`. The only difference is that it also provides a C compiler in `buildCommand`'s environment. To minimize your dependencies, you should only use this if you are sure you will need a C compiler as part of running your command.
+
+## `runCommandLocal` {#trivial-builder-runCommandLocal}
+
+Variant of `runCommand` that forces the derivation to be built locally, it is not substituted. This is intended for very cheap commands (<1s execution time). It saves on the network round-trip and can speed up a build.
+
+::: {.note}
+This sets [`allowSubstitutes` to `false`](https://nixos.org/nix/manual/#adv-attr-allowSubstitutes), so only use `runCommandLocal` if you are certain the user will always have a builder for the `system` of the derivation. This should be true for most trivial use cases (e.g., just copying some files to a different location or adding symlinks) because there the `system` is usually the same as `builtins.currentSystem`.
+:::
+
+## Writing text files {#trivial-builder-text-writing}
+
+Nixpkgs provides the following functions for producing derivations which write text files or executable scripts into the Nix store.
+They are useful for creating files from Nix expression, and are all implemented as convenience wrappers around `writeTextFile`.
+
+Each of these functions will cause a derivation to be produced.
+When you coerce the result of each of these functions to a string with [string interpolation](https://nixos.org/manual/nix/stable/language/string-interpolation) or [`builtins.toString`](https://nixos.org/manual/nix/stable/language/builtins#builtins-toString), it will evaluate to the [store path](https://nixos.org/manual/nix/stable/store/store-path) of this derivation.
+
+:::: {.note}
+Some of these functions will put the resulting files within a directory inside the [derivation output](https://nixos.org/manual/nix/stable/language/derivations#attr-outputs).
+If you need to refer to the resulting files somewhere else in a Nix expression, append their path to the derivation's store path.
+
+For example, if the file destination is a directory:
+
+```nix
+{
+  my-file = writeTextFile {
+    name = "my-file";
+    text = ''
+      Contents of File
+    '';
+    destination = "/share/my-file";
+  };
+}
+```
+
+Remember to append "/share/my-file" to the resulting store path when using it elsewhere:
+
+```nix
+writeShellScript "evaluate-my-file.sh" ''
+  cat ${my-file}/share/my-file
+''
+```
+::::
+
+### `makeDesktopItem` {#trivial-builder-makeDesktopItem}
+
+Write an [XDG desktop file](https://specifications.freedesktop.org/desktop-entry-spec/1.4/) to the Nix store.
+
+This function is usually used to add desktop items to a package through the `copyDesktopItems` hook.
+
+`makeDesktopItem` adheres to version 1.4 of the specification.
+
+#### Inputs {#trivial-builder-makeDesktopItem-inputs}
+
+`makeDesktopItem` takes an attribute set that accepts most values from the [XDG specification](https://specifications.freedesktop.org/desktop-entry-spec/1.4/ar01s06.html).
+
+All recognised keys from the specification are supported with the exception of the "Hidden" field. The keys are converted into camelCase format, but correspond 1:1 to their equivalent in the specification: `genericName`, `noDisplay`, `comment`, `icon`, `onlyShowIn`, `notShowIn`, `dbusActivatable`, `tryExec`, `exec`, `path`, `terminal`, `mimeTypes`, `categories`, `implements`, `keywords`, `startupNotify`, `startupWMClass`, `url`, `prefersNonDefaultGPU`.
+
+The "Version" field is hardcoded to the version `makeDesktopItem` currently adheres to.
+
+The following fields are either required, are of a different type than in the specification, carry specific default values, or are additional fields supported by `makeDesktopItem`:
+
+`name` (String)
+
+: The name of the desktop file in the Nix store.
+
+`type` (String; _optional_)
+
+: Default value: `"Application"`
+
+`desktopName` (String)
+
+: Corresponds to the "Name" field of the specification.
+
+`actions` (List of Attribute set; _optional_)
+
+: A list of attribute sets {name, exec?, icon?}
+
+`extraConfig` (Attribute set; _optional_)
+
+: Additional key/value pairs to be added verbatim to the desktop file. Attributes need to be prefixed with 'X-'.
+
+#### Examples {#trivial-builder-makeDesktopItem-examples}
+
+::: {.example #ex-makeDesktopItem}
+# Usage 1 of `makeDesktopItem`
+
+Write a desktop file `/nix/store/<store path>/my-program.desktop` to the Nix store.
+
+```nix
+{makeDesktopItem}:
+makeDesktopItem {
+  name = "my-program";
+  desktopName = "My Program";
+  genericName = "Video Player";
+  noDisplay = false;
+  comment = "Cool video player";
+  icon = "/path/to/icon";
+  onlyShowIn = [ "KDE" ];
+  dbusActivatable = true;
+  tryExec = "my-program";
+  exec = "my-program --someflag";
+  path = "/some/working/path";
+  terminal = false;
+  actions.example = {
+    name = "New Window";
+    exec = "my-program --new-window";
+    icon = "/some/icon";
+  };
+  mimeTypes = [ "video/mp4" ];
+  categories = [ "Utility" ];
+  implements = [ "org.my-program" ];
+  keywords = [ "Video" "Player" ];
+  startupNotify = false;
+  startupWMClass = "MyProgram";
+  prefersNonDefaultGPU = false;
+  extraConfig.X-SomeExtension = "somevalue";
+}
+```
+
+:::
+
+::: {.example #ex2-makeDesktopItem}
+# Usage 2 of `makeDesktopItem`
+
+Override the `hello` package to add a desktop item.
+
+```nix
+{ copyDesktopItems
+, hello
+, makeDesktopItem }:
+
+hello.overrideAttrs {
+  nativeBuildInputs = [ copyDesktopItems ];
+
+  desktopItems = [(makeDesktopItem {
+    name = "hello";
+    desktopName = "Hello";
+    exec = "hello";
+  })];
+}
+```
+
+:::
+
+### `writeTextFile` {#trivial-builder-writeTextFile}
+
+Write a text file to the Nix store.
+
+`writeTextFile` takes an attribute set with the following possible attributes:
+
+`name` (String)
+
+: Corresponds to the name used in the Nix store path identifier.
+
+`text` (String)
+
+: The contents of the file.
+
+`executable` (Bool, _optional_)
+
+: Make this file have the executable bit set.
+
+  Default: `false`
+
+`destination` (String, _optional_)
+
+: A subpath under the derivation's output path into which to put the file.
+  Subdirectories are created automatically when the derivation is realised.
+
+  By default, the store path itself will be a file containing the text contents.
+
+  Default: `""`
+
+`checkPhase` (String, _optional_)
+
+: Commands to run after generating the file.
+
+  Default: `""`
+
+`meta` (Attribute set, _optional_)
+
+: Additional metadata for the derivation.
+
+  Default: `{}`
+
+`allowSubstitutes` (Bool, _optional_)
+
+: Whether to allow substituting from a binary cache.
+  Passed through to [`allowSubsitutes`](https://nixos.org/manual/nix/stable/language/advanced-attributes#adv-attr-allowSubstitutes) of the underlying call to `builtins.derivation`.
+
+  It defaults to `false`, as running the derivation's simple `builder` executable locally is assumed to be faster than network operations.
+  Set it to true if the `checkPhase` step is expensive.
+
+  Default: `false`
+
+`preferLocalBuild` (Bool, _optional_)
+
+: Whether to prefer building locally, even if faster [remote build machines](https://nixos.org/manual/nix/stable/command-ref/conf-file#conf-substituters) are available.
+
+  Passed through to [`preferLocalBuild`](https://nixos.org/manual/nix/stable/language/advanced-attributes#adv-attr-preferLocalBuild) of the underlying call to `builtins.derivation`.
+
+  It defaults to `true` for the same reason `allowSubstitutes` defaults to `false`.
+
+  Default: `true`
+
+`derivationArgs` (Attribute set, _optional_)
+
+: Extra arguments to pass to the underlying call to `stdenv.mkDerivation`.
+
+  Default: `{}`
+
+The resulting store path will include some variation of the name, and it will be a file unless `destination` is used, in which case it will be a directory.
+
+::: {.example #ex-writeTextFile}
+# Usage 1 of `writeTextFile`
+
+Write `my-file` to `/nix/store/<store path>/some/subpath/my-cool-script`, making it executable.
+Also run a check on the resulting file in a `checkPhase`, and supply values for the less-used options.
+
+```nix
+writeTextFile {
+  name = "my-cool-script";
+  text = ''
+    #!/bin/sh
+    echo "This is my cool script!"
+  '';
+  executable = true;
+  destination = "/some/subpath/my-cool-script";
+  checkPhase = ''
+    ${pkgs.shellcheck}/bin/shellcheck $out/some/subpath/my-cool-script
+  '';
+  meta = {
+    license = pkgs.lib.licenses.cc0;
+  };
+  allowSubstitutes = true;
+  preferLocalBuild = false;
+}
+```
+:::
+
+::: {.example #ex2-writeTextFile}
+# Usage 2 of `writeTextFile`
+
+Write the string `Contents of File` to `/nix/store/<store path>`.
+See also the [](#trivial-builder-writeText) helper function.
+
+```nix
+writeTextFile {
+  name = "my-file";
+  text = ''
+    Contents of File
+  '';
+}
+```
+:::
+
+::: {.example #ex3-writeTextFile}
+# Usage 3 of `writeTextFile`
+
+Write an executable script `my-script` to `/nix/store/<store path>/bin/my-script`.
+See also the [](#trivial-builder-writeScriptBin) helper function.
+
+```nix
+writeTextFile {
+  name = "my-script";
+  text = ''
+    echo "hi"
+  '';
+  executable = true;
+  destination = "/bin/my-script";
+}
+```
+:::
+
+### `writeText` {#trivial-builder-writeText}
+
+Write a text file to the Nix store
+
+`writeText` takes the following arguments:
+a string.
+
+`name` (String)
+
+: The name used in the Nix store path.
+
+`text` (String)
+
+: The contents of the file.
+
+The store path will include the name, and it will be a file.
+
+::: {.example #ex-writeText}
+# Usage of `writeText`
+
+Write the string `Contents of File` to `/nix/store/<store path>`:
+
+```nix
+writeText "my-file"
+  ''
+  Contents of File
+  ''
+```
+:::
+
+This is equivalent to:
+
+```nix
+writeTextFile {
+  name = "my-file";
+  text = ''
+    Contents of File
+  '';
+}
+```
+
+### `writeTextDir` {#trivial-builder-writeTextDir}
+
+Write a text file within a subdirectory of the Nix store.
+
+`writeTextDir` takes the following arguments:
+
+`path` (String)
+
+: The destination within the Nix store path under which to create the file.
+
+`text` (String)
+
+: The contents of the file.
+
+The store path will be a directory.
+
+::: {.example #ex-writeTextDir}
+# Usage of `writeTextDir`
+
+Write the string `Contents of File` to `/nix/store/<store path>/share/my-file`:
+
+```nix
+writeTextDir "share/my-file"
+  ''
+  Contents of File
+  ''
+```
+:::
+
+This is equivalent to:
+
+```nix
+writeTextFile {
+  name = "my-file";
+  text = ''
+    Contents of File
+  '';
+  destination = "share/my-file";
+}
+```
+
+### `writeScript` {#trivial-builder-writeScript}
+
+Write an executable script file to the Nix store.
+
+`writeScript` takes the following arguments:
+
+`name` (String)
+
+: The name used in the Nix store path.
+
+`text` (String)
+
+: The contents of the file.
+
+The created file is marked as executable.
+The store path will include the name, and it will be a file.
+
+::: {.example #ex-writeScript}
+# Usage of `writeScript`
+
+Write the string `Contents of File` to `/nix/store/<store path>` and make the file executable.
+
+```nix
+writeScript "my-file"
+  ''
+  Contents of File
+  ''
+```
+:::
+
+This is equivalent to:
+
+```nix
+writeTextFile {
+  name = "my-file";
+  text = ''
+    Contents of File
+  '';
+  executable = true;
+}
+```
+
+### `writeScriptBin` {#trivial-builder-writeScriptBin}
+
+Write a script within a `bin` subirectory of a directory in the Nix store.
+This is for consistency with the convention of software packages placing executables under `bin`.
+
+`writeScriptBin` takes the following arguments:
+
+`name` (String)
+
+: The name used in the Nix store path and within the file created under the store path.
+
+`text` (String)
+
+: The contents of the file.
+
+The created file is marked as executable.
+The file's contents will be put into `/nix/store/<store path>/bin/<name>`.
+The store path will include the the name, and it will be a directory.
+
+::: {.example #ex-writeScriptBin}
+# Usage of `writeScriptBin`
+
+```nix
+writeScriptBin "my-script"
+  ''
+  echo "hi"
+  ''
+```
+:::
+
+This is equivalent to:
+
+```nix
+writeTextFile {
+  name = "my-script";
+  text = ''
+    echo "hi"
+  '';
+  executable = true;
+  destination = "bin/my-script";
+}
+```
+
+### `writeShellScript` {#trivial-builder-writeShellScript}
+
+Write a Bash script to the store.
+
+`writeShellScript` takes the following arguments:
+
+`name` (String)
+
+: The name used in the Nix store path.
+
+`text` (String)
+
+: The contents of the file.
+
+The created file is marked as executable.
+The store path will include the name, and it will be a file.
+
+This function is almost exactly like [](#trivial-builder-writeScript), except that it prepends to the file a [shebang](https://en.wikipedia.org/wiki/Shebang_%28Unix%29) line that points to the version of Bash used in Nixpkgs.
+<!-- this cannot be changed in practice, so there is no point pretending it's somehow generic -->
+
+::: {.example #ex-writeShellScript}
+# Usage of `writeShellScript`
+
+```nix
+writeShellScript "my-script"
+  ''
+  echo "hi"
+  ''
+```
+:::
+
+This is equivalent to:
+
+```nix
+writeTextFile {
+  name = "my-script";
+  text = ''
+    #! ${pkgs.runtimeShell}
+    echo "hi"
+  '';
+  executable = true;
+}
+```
+
+### `writeShellScriptBin` {#trivial-builder-writeShellScriptBin}
+
+Write a Bash script to a "bin" subdirectory of a directory in the Nix store.
+
+`writeShellScriptBin` takes the following arguments:
+
+`name` (String)
+
+: The name used in the Nix store path and within the file generated under the store path.
+
+`text` (String)
+
+: The contents of the file.
+
+The file's contents will be put into `/nix/store/<store path>/bin/<name>`.
+The store path will include the the name, and it will be a directory.
+
+This function is a combination of [](#trivial-builder-writeShellScript) and [](#trivial-builder-writeScriptBin).
+
+::: {.example #ex-writeShellScriptBin}
+# Usage of `writeShellScriptBin`
+
+```nix
+writeShellScriptBin "my-script"
+  ''
+  echo "hi"
+  ''
+```
+:::
+
+This is equivalent to:
+
+```nix
+writeTextFile {
+  name = "my-script";
+  text = ''
+    #! ${pkgs.runtimeShell}
+    echo "hi"
+  '';
+  executable = true;
+  destination = "bin/my-script";
+}
+```
+
+## `concatTextFile`, `concatText`, `concatScript` {#trivial-builder-concatText}
+
+These functions concatenate `files` to the Nix store in a single file. This is useful for configuration files structured in lines of text. `concatTextFile` takes an attribute set and expects two arguments, `name` and `files`. `name` corresponds to the name used in the Nix store path. `files` will be the files to be concatenated. You can also set `executable` to true to make this file have the executable bit set.
+`concatText` and`concatScript` are simple wrappers over `concatTextFile`.
+
+Here are a few examples:
+```nix
+
+# Writes my-file to /nix/store/<store path>
+concatTextFile {
+  name = "my-file";
+  files = [ drv1 "${drv2}/path/to/file" ];
+}
+# See also the `concatText` helper function below.
+
+# Writes executable my-file to /nix/store/<store path>/bin/my-file
+concatTextFile {
+  name = "my-file";
+  files = [ drv1 "${drv2}/path/to/file" ];
+  executable = true;
+  destination = "/bin/my-file";
+}
+# Writes contents of files to /nix/store/<store path>
+concatText "my-file" [ file1 file2 ]
+
+# Writes contents of files to /nix/store/<store path>
+concatScript "my-file" [ file1 file2 ]
+```
+
+## `writeShellApplication` {#trivial-builder-writeShellApplication}
+
+`writeShellApplication` is similar to `writeShellScriptBin` and `writeScriptBin` but supports runtime dependencies with `runtimeInputs`.
+Writes an executable shell script to `/nix/store/<store path>/bin/<name>` and checks its syntax with [`shellcheck`](https://github.com/koalaman/shellcheck) and the `bash`'s `-n` option.
+Some basic Bash options are set by default (`errexit`, `nounset`, and `pipefail`), but can be overridden with `bashOptions`.
+
+Extra arguments may be passed to `stdenv.mkDerivation` by setting `derivationArgs`; note that variables set in this manner will be set when the shell script is _built,_ not when it's run.
+Runtime environment variables can be set with the `runtimeEnv` argument.
+
+For example, the following shell application can refer to `curl` directly, rather than needing to write `${curl}/bin/curl`:
+
+```nix
+writeShellApplication {
+  name = "show-nixos-org";
+
+  runtimeInputs = [ curl w3m ];
+
+  text = ''
+    curl -s 'https://nixos.org' | w3m -dump -T text/html
+  '';
+}
+```
+
+## `symlinkJoin` {#trivial-builder-symlinkJoin}
+
+This can be used to put many derivations into the same directory structure. It works by creating a new derivation and adding symlinks to each of the paths listed. It expects two arguments, `name`, and `paths`. `name` is the name used in the Nix store path for the created derivation. `paths` is a list of paths that will be symlinked. These paths can be to Nix store derivations or any other subdirectory contained within.
+Here is an example:
+```nix
+# adds symlinks of hello and stack to current build and prints "links added"
+symlinkJoin { name = "myexample"; paths = [ pkgs.hello pkgs.stack ]; postBuild = "echo links added"; }
+```
+This creates a derivation with a directory structure like the following:
+```
+/nix/store/sglsr5g079a5235hy29da3mq3hv8sjmm-myexample
+|-- bin
+|   |-- hello -> /nix/store/qy93dp4a3rqyn2mz63fbxjg228hffwyw-hello-2.10/bin/hello
+|   `-- stack -> /nix/store/6lzdpxshx78281vy056lbk553ijsdr44-stack-2.1.3.1/bin/stack
+`-- share
+    |-- bash-completion
+    |   `-- completions
+    |       `-- stack -> /nix/store/6lzdpxshx78281vy056lbk553ijsdr44-stack-2.1.3.1/share/bash-completion/completions/stack
+    |-- fish
+    |   `-- vendor_completions.d
+    |       `-- stack.fish -> /nix/store/6lzdpxshx78281vy056lbk553ijsdr44-stack-2.1.3.1/share/fish/vendor_completions.d/stack.fish
+...
+```
+
+## `writeReferencesToFile` {#trivial-builder-writeReferencesToFile}
+
+Deprecated. Use [`writeClosure`](#trivial-builder-writeClosure) instead.
+
+## `writeClosure` {#trivial-builder-writeClosure}
+
+Given a list of [store paths](https://nixos.org/manual/nix/stable/glossary#gloss-store-path) (or string-like expressions coercible to store paths), write their collective [closure](https://nixos.org/manual/nix/stable/glossary#gloss-closure) to a text file.
+
+The result is equivalent to the output of `nix-store -q --requisites`.
+
+For example,
+
+```nix
+writeClosure [ (writeScriptBin "hi" ''${hello}/bin/hello'') ]
+```
+
+produces an output path `/nix/store/<hash>-runtime-deps` containing
+
+```
+/nix/store/<hash>-hello-2.10
+/nix/store/<hash>-hi
+/nix/store/<hash>-libidn2-2.3.0
+/nix/store/<hash>-libunistring-0.9.10
+/nix/store/<hash>-glibc-2.32-40
+```
+
+You can see that this includes `hi`, the original input path,
+`hello`, which is a direct reference, but also
+the other paths that are indirectly required to run `hello`.
+
+## `writeDirectReferencesToFile` {#trivial-builder-writeDirectReferencesToFile}
+
+Writes the set of references to the output file, that is, their immediate dependencies.
+
+This produces the equivalent of `nix-store -q --references`.
+
+For example,
+
+```nix
+writeDirectReferencesToFile (writeScriptBin "hi" ''${hello}/bin/hello'')
+```
+
+produces an output path `/nix/store/<hash>-runtime-references` containing
+
+```
+/nix/store/<hash>-hello-2.10
+```
+
+but none of `hello`'s dependencies because those are not referenced directly
+by `hi`'s output.
diff --git a/nixpkgs/doc/common.nix b/nixpkgs/doc/common.nix
new file mode 100644
index 000000000000..56f723eb6bd7
--- /dev/null
+++ b/nixpkgs/doc/common.nix
@@ -0,0 +1,4 @@
+{
+  outputPath = "share/doc/nixpkgs";
+  indexPath = "manual.html";
+}
diff --git a/nixpkgs/doc/contributing.md b/nixpkgs/doc/contributing.md
new file mode 100644
index 000000000000..3215dbe32bec
--- /dev/null
+++ b/nixpkgs/doc/contributing.md
@@ -0,0 +1,10 @@
+# Contributing to Nixpkgs {#part-contributing}
+
+```{=include=} chapters
+contributing/quick-start.chapter.md
+contributing/coding-conventions.chapter.md
+contributing/submitting-changes.chapter.md
+contributing/vulnerability-roundup.chapter.md
+contributing/reviewing-contributions.chapter.md
+contributing/contributing-to-documentation.chapter.md
+```
diff --git a/nixpkgs/doc/contributing/coding-conventions.chapter.md b/nixpkgs/doc/contributing/coding-conventions.chapter.md
new file mode 100644
index 000000000000..3afa6140c6cd
--- /dev/null
+++ b/nixpkgs/doc/contributing/coding-conventions.chapter.md
@@ -0,0 +1,63 @@
+# Coding conventions {#chap-conventions}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
+## Syntax {#sec-syntax}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
+## Package naming {#sec-package-naming}
+
+This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
+
+## File naming and organisation {#sec-organisation}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
+### Versioning {#sec-versioning}
+
+This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
+
+## Fetching Sources {#sec-sources}
+
+This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
+
+## Obtaining source hash {#sec-source-hashes}
+
+This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
+
+### Obtaining hashes securely {#sec-source-hashes-security}
+
+This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
+
+## Patches {#sec-patches}
+
+This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
+
+## Package tests {#sec-package-tests}
+
+This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
+
+### Writing inline package tests {#ssec-inline-package-tests-writing}
+
+This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
+
+### Writing larger package tests {#ssec-package-tests-writing}
+
+This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
+
+### Running package tests {#ssec-package-tests-running}
+
+This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
+
+### Examples of package tests {#ssec-package-tests-examples}
+
+This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
+
+### Linking NixOS module tests to a package {#ssec-nixos-tests-linking}
+
+This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
+
+### Import From Derivation {#ssec-import-from-derivation}
+
+This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
diff --git a/nixpkgs/doc/contributing/contributing-to-documentation.chapter.md b/nixpkgs/doc/contributing/contributing-to-documentation.chapter.md
new file mode 100644
index 000000000000..777858b901c3
--- /dev/null
+++ b/nixpkgs/doc/contributing/contributing-to-documentation.chapter.md
@@ -0,0 +1,11 @@
+# Contributing to Nixpkgs documentation {#chap-contributing}
+
+This section has been moved to [doc/README.md](https://github.com/NixOS/nixpkgs/blob/master/doc/README.md).
+
+## devmode {#sec-contributing-devmode}
+
+This section has been moved to [doc/README.md](https://github.com/NixOS/nixpkgs/blob/master/doc/README.md).
+
+## Syntax {#sec-contributing-markup}
+
+This section has been moved to [doc/README.md](https://github.com/NixOS/nixpkgs/blob/master/doc/README.md).
diff --git a/nixpkgs/doc/contributing/quick-start.chapter.md b/nixpkgs/doc/contributing/quick-start.chapter.md
new file mode 100644
index 000000000000..e482de7bc7b0
--- /dev/null
+++ b/nixpkgs/doc/contributing/quick-start.chapter.md
@@ -0,0 +1,3 @@
+# Quick Start to Adding a Package {#chap-quick-start}
+
+This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
diff --git a/nixpkgs/doc/contributing/reviewing-contributions.chapter.md b/nixpkgs/doc/contributing/reviewing-contributions.chapter.md
new file mode 100644
index 000000000000..c291ef5b1293
--- /dev/null
+++ b/nixpkgs/doc/contributing/reviewing-contributions.chapter.md
@@ -0,0 +1,35 @@
+# Reviewing contributions {#chap-reviewing-contributions}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
+## Package updates {#reviewing-contributions-package-updates}
+
+This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
+
+## New packages {#reviewing-contributions-new-packages}
+
+This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
+
+## Module updates {#reviewing-contributions-module-updates}
+
+This section has been moved to [nixos/README.md](https://github.com/NixOS/nixpkgs/blob/master/nixos/README.md).
+
+## New modules {#reviewing-contributions-new-modules}
+
+This section has been moved to [nixos/README.md](https://github.com/NixOS/nixpkgs/blob/master/nixos/README.md).
+
+## Individual maintainer list {#reviewing-contributions-individual-maintainer-list}
+
+This section has been moved to [maintainers/README.md](https://github.com/NixOS/nixpkgs/blob/master/maintainers/README.md).
+
+## Maintainer teams {#reviewing-contributions-maintainer-teams}
+
+This section has been moved to [maintainers/README.md](https://github.com/NixOS/nixpkgs/blob/master/maintainers/README.md).
+
+## Other submissions {#reviewing-contributions-other-submissions}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
+## Merging pull requests {#reviewing-contributions--merging-pull-requests}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
diff --git a/nixpkgs/doc/contributing/submitting-changes.chapter.md b/nixpkgs/doc/contributing/submitting-changes.chapter.md
new file mode 100644
index 000000000000..d47a5359779e
--- /dev/null
+++ b/nixpkgs/doc/contributing/submitting-changes.chapter.md
@@ -0,0 +1,88 @@
+# Submitting changes {#chap-submitting-changes}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
+## Submitting changes {#submitting-changes-submitting-changes}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
+## Submitting security fixes {#submitting-changes-submitting-security-fixes}
+
+This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
+
+## Deprecating/removing packages {#submitting-changes-deprecating-packages}
+
+This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
+
+### Steps to remove a package from Nixpkgs {#steps-to-remove-a-package-from-nixpkgs}
+
+This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
+
+## Pull Request Template {#submitting-changes-pull-request-template}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
+### Tested using sandboxing {#submitting-changes-tested-with-sandbox}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
+### Built on platform(s) {#submitting-changes-platform-diversity}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
+### Tested via one or more NixOS test(s) if existing and applicable for the change (look inside nixos/tests) {#submitting-changes-nixos-tests}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
+### Tested compilation of all pkgs that depend on this change using `nixpkgs-review` {#submitting-changes-tested-compilation}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
+### Tested execution of all binary files (usually in `./result/bin/`) {#submitting-changes-tested-execution}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
+### Meets Nixpkgs contribution standards {#submitting-changes-contribution-standards}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
+## Hotfixing pull requests {#submitting-changes-hotfixing-pull-requests}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
+## Commit policy {#submitting-changes-commit-policy}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
+### Branches {#submitting-changes-branches}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
+#### Master branch {#submitting-changes-master-branch}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
+#### Staging branch {#submitting-changes-staging-branch}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
+#### Staging-next branch {#submitting-changes-staging-next-branch}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
+#### Stable release branches {#submitting-changes-stable-release-branches}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
+#### Automatically backporting a Pull Request {#submitting-changes-stable-release-branches-automatic-backports}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
+#### Manually backporting changes {#submitting-changes-stable-release-branches-manual-backports}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
+#### Acceptable backport criteria {#acceptable-backport-criteria}
+
+This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
diff --git a/nixpkgs/doc/contributing/vulnerability-roundup.chapter.md b/nixpkgs/doc/contributing/vulnerability-roundup.chapter.md
new file mode 100644
index 000000000000..0880fecea982
--- /dev/null
+++ b/nixpkgs/doc/contributing/vulnerability-roundup.chapter.md
@@ -0,0 +1,11 @@
+# Vulnerability Roundup {#chap-vulnerability-roundup}
+
+This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
+
+## Issues {#vulnerability-roundup-issues}
+
+This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
+
+## Triaging and Fixing {#vulnerability-roundup-triaging-and-fixing}
+
+This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
diff --git a/nixpkgs/doc/default.nix b/nixpkgs/doc/default.nix
new file mode 100644
index 000000000000..76aba1a03eaf
--- /dev/null
+++ b/nixpkgs/doc/default.nix
@@ -0,0 +1,176 @@
+{ pkgs ? (import ./.. { }), nixpkgs ? { }}:
+let
+  inherit (pkgs) lib;
+  inherit (lib) hasPrefix removePrefix;
+
+  common = import ./common.nix;
+
+  lib-docs = import ./doc-support/lib-function-docs.nix {
+    inherit pkgs nixpkgs;
+    libsets = [
+      { name = "asserts"; description = "assertion functions"; }
+      { name = "attrsets"; description = "attribute set functions"; }
+      { name = "strings"; description = "string manipulation functions"; }
+      { name = "versions"; description = "version string functions"; }
+      { name = "trivial"; description = "miscellaneous functions"; }
+      { name = "fixedPoints"; baseName = "fixed-points"; description = "explicit recursion functions"; }
+      { name = "lists"; description = "list manipulation functions"; }
+      { name = "debug"; description = "debugging functions"; }
+      { name = "options"; description = "NixOS / nixpkgs option handling"; }
+      { name = "path"; description = "path functions"; }
+      { name = "filesystem"; description = "filesystem functions"; }
+      { name = "fileset"; description = "file set functions"; }
+      { name = "sources"; description = "source filtering functions"; }
+      { name = "cli"; description = "command-line serialization functions"; }
+      { name = "gvariant"; description = "GVariant formatted string serialization functions"; }
+      { name = "customisation"; description = "Functions to customise (derivation-related) functions, derivatons, or attribute sets"; }
+      { name = "meta"; description = "functions for derivation metadata"; }
+      { name = "derivations"; description = "miscellaneous derivation-specific functions"; }
+    ];
+  };
+
+  epub = pkgs.runCommand "manual.epub" {
+    nativeBuildInputs = with pkgs; [ libxslt zip ];
+
+    epub = ''
+      <book xmlns="http://docbook.org/ns/docbook"
+            xmlns:xlink="http://www.w3.org/1999/xlink"
+            version="5.0"
+            xml:id="nixpkgs-manual">
+        <info>
+          <title>Nixpkgs Manual</title>
+          <subtitle>Version ${pkgs.lib.version}</subtitle>
+        </info>
+        <chapter>
+          <title>Temporarily unavailable</title>
+          <para>
+            The Nixpkgs manual is currently not available in EPUB format,
+            please use the <link xlink:href="https://nixos.org/nixpkgs/manual">HTML manual</link>
+            instead.
+          </para>
+          <para>
+            If you've used the EPUB manual in the past and it has been useful to you, please
+            <link xlink:href="https://github.com/NixOS/nixpkgs/issues/237234">let us know</link>.
+          </para>
+        </chapter>
+      </book>
+    '';
+
+    passAsFile = [ "epub" ];
+  } ''
+    mkdir scratch
+    xsltproc \
+      --param chapter.autolabel 0 \
+      --nonet \
+      --output scratch/ \
+      ${pkgs.docbook_xsl_ns}/xml/xsl/docbook/epub/docbook.xsl \
+      $epubPath
+
+    echo "application/epub+zip" > mimetype
+    zip -0Xq "$out" mimetype
+    cd scratch && zip -Xr9D "$out" *
+  '';
+
+  # NB: This file describes the Nixpkgs manual, which happens to use module
+  #     docs infra originally developed for NixOS.
+  optionsDoc = pkgs.nixosOptionsDoc {
+    inherit (pkgs.lib.evalModules {
+      modules = [ ../pkgs/top-level/config.nix ];
+      class = "nixpkgsConfig";
+    }) options;
+    documentType = "none";
+    transformOptions = opt:
+      opt // {
+        declarations =
+          map
+            (decl:
+              if hasPrefix (toString ../..) (toString decl)
+              then
+                let subpath = removePrefix "/" (removePrefix (toString ../.) (toString decl));
+                in { url = "https://github.com/NixOS/nixpkgs/blob/master/${subpath}"; name = subpath; }
+              else decl)
+            opt.declarations;
+        };
+  };
+in pkgs.stdenv.mkDerivation {
+  name = "nixpkgs-manual";
+
+  nativeBuildInputs = with pkgs; [
+    nixos-render-docs
+  ];
+
+  src = ./.;
+
+  postPatch = ''
+    ln -s ${optionsDoc.optionsJSON}/share/doc/nixos/options.json ./config-options.json
+  '';
+
+  buildPhase = ''
+    cat \
+      ./functions/library.md.in \
+      ${lib-docs}/index.md \
+      > ./functions/library.md
+    substitute ./manual.md.in ./manual.md \
+      --replace '@MANUAL_VERSION@' '${pkgs.lib.version}'
+
+    mkdir -p out/media
+
+    mkdir -p out/highlightjs
+    cp -t out/highlightjs \
+      ${pkgs.documentation-highlighter}/highlight.pack.js \
+      ${pkgs.documentation-highlighter}/LICENSE \
+      ${pkgs.documentation-highlighter}/mono-blue.css \
+      ${pkgs.documentation-highlighter}/loader.js
+
+    cp -t out ./style.css ./anchor.min.js ./anchor-use.js
+
+    nixos-render-docs manual html \
+      --manpage-urls ./manpage-urls.json \
+      --revision ${pkgs.lib.trivial.revisionWithDefault (pkgs.rev or "master")} \
+      --stylesheet style.css \
+      --stylesheet highlightjs/mono-blue.css \
+      --script ./highlightjs/highlight.pack.js \
+      --script ./highlightjs/loader.js \
+      --script ./anchor.min.js \
+      --script ./anchor-use.js \
+      --toc-depth 1 \
+      --section-toc-depth 1 \
+      manual.md \
+      out/index.html
+  '';
+
+  installPhase = ''
+    dest="$out/${common.outputPath}"
+    mkdir -p "$(dirname "$dest")"
+    mv out "$dest"
+    mv "$dest/index.html" "$dest/${common.indexPath}"
+
+    cp ${epub} "$dest/nixpkgs-manual.epub"
+
+    mkdir -p $out/nix-support/
+    echo "doc manual $dest ${common.indexPath}" >> $out/nix-support/hydra-build-products
+    echo "doc manual $dest nixpkgs-manual.epub" >> $out/nix-support/hydra-build-products
+  '';
+
+  passthru.tests.manpage-urls = with pkgs; testers.invalidateFetcherByDrvHash
+    ({ name ? "manual_check-manpage-urls"
+     , script
+     , urlsFile
+     }: runCommand name {
+      nativeBuildInputs = [
+        cacert
+        (python3.withPackages (p: with p; [
+          aiohttp
+          rich
+          structlog
+        ]))
+      ];
+      outputHash = "sha256-47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=";  # Empty output
+    } ''
+      python3 ${script} ${urlsFile}
+      touch $out
+    '') {
+      script = ./tests/manpage-urls.py;
+      urlsFile = ./manpage-urls.json;
+    };
+}
diff --git a/nixpkgs/doc/development.md b/nixpkgs/doc/development.md
new file mode 100644
index 000000000000..0c092befca24
--- /dev/null
+++ b/nixpkgs/doc/development.md
@@ -0,0 +1,10 @@
+# Development of Nixpkgs {#part-development}
+
+This section shows you how Nixpkgs is being developed and how you can interact with the contributors and the latest updates.
+If you are interested in contributing yourself, see [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
+
+<!-- In the future this section should also include: How to test pull requests, how to know if pull requests are available in channels, etc. -->
+
+```{=include=} chapters
+development/opening-issues.chapter.md
+```
diff --git a/nixpkgs/doc/development/opening-issues.chapter.md b/nixpkgs/doc/development/opening-issues.chapter.md
new file mode 100644
index 000000000000..2b82efae593a
--- /dev/null
+++ b/nixpkgs/doc/development/opening-issues.chapter.md
@@ -0,0 +1,7 @@
+# Opening issues {#sec-opening-issues}
+
+* Make sure you have a [GitHub account](https://github.com/signup/free)
+* Make sure there is no open issue on the topic
+* [Submit a new issue](https://github.com/NixOS/nixpkgs/issues/new/choose) by choosing the kind of topic and fill out the template
+
+<!-- In the future this section could also include more detailed information on the issue templates -->
diff --git a/nixpkgs/doc/doc-support/lib-function-docs.nix b/nixpkgs/doc/doc-support/lib-function-docs.nix
new file mode 100644
index 000000000000..5faa99b3e89e
--- /dev/null
+++ b/nixpkgs/doc/doc-support/lib-function-docs.nix
@@ -0,0 +1,41 @@
+# Generates the documentation for library functions via nixdoc.
+
+{ pkgs, nixpkgs, libsets }:
+
+with pkgs;
+
+let
+  locationsJSON = import ./lib-function-locations.nix { inherit pkgs nixpkgs libsets; };
+in
+stdenv.mkDerivation {
+  name = "nixpkgs-lib-docs";
+  src = ../../lib;
+
+  buildInputs = [ nixdoc ];
+  installPhase = ''
+    function docgen {
+      name=$1
+      baseName=$2
+      description=$3
+      # TODO: wrap lib.$name in <literal>, make nixdoc not escape it
+      if [[ -e "../lib/$baseName.nix" ]]; then
+        nixdoc -c "$name" -d "lib.$name: $description" -l ${locationsJSON} -f "$baseName.nix" > "$out/$name.md"
+      else
+        nixdoc -c "$name" -d "lib.$name: $description" -l ${locationsJSON} -f "$baseName/default.nix" > "$out/$name.md"
+      fi
+      echo "$out/$name.md" >> "$out/index.md"
+    }
+
+    mkdir -p "$out"
+
+    cat > "$out/index.md" << 'EOF'
+    ```{=include=} sections auto-id-prefix=auto-generated
+    EOF
+
+    ${lib.concatMapStrings ({ name, baseName ? name, description }: ''
+      docgen ${name} ${baseName} ${lib.escapeShellArg description}
+    '') libsets}
+
+    echo '```' >> "$out/index.md"
+  '';
+}
diff --git a/nixpkgs/doc/doc-support/lib-function-locations.nix b/nixpkgs/doc/doc-support/lib-function-locations.nix
new file mode 100644
index 000000000000..e6794617fdd8
--- /dev/null
+++ b/nixpkgs/doc/doc-support/lib-function-locations.nix
@@ -0,0 +1,75 @@
+{ pkgs, nixpkgs ? { }, libsets }:
+let
+  revision = pkgs.lib.trivial.revisionWithDefault (nixpkgs.rev or "master");
+
+  libDefPos = prefix: set:
+    builtins.concatMap
+      (name: [{
+        name = builtins.concatStringsSep "." (prefix ++ [name]);
+        location = builtins.unsafeGetAttrPos name set;
+      }] ++ nixpkgsLib.optionals
+        (builtins.length prefix == 0 && builtins.isAttrs set.${name})
+        (libDefPos (prefix ++ [name]) set.${name})
+      ) (builtins.attrNames set);
+
+  libset = toplib:
+    builtins.map
+      (subsetname: {
+        subsetname = subsetname;
+        functions = libDefPos [] toplib.${subsetname};
+      })
+      (builtins.map (x: x.name) libsets);
+
+  nixpkgsLib = pkgs.lib;
+
+  flattenedLibSubset = { subsetname, functions }:
+  builtins.map
+    (fn: {
+      name = "lib.${subsetname}.${fn.name}";
+      value = fn.location;
+    })
+    functions;
+
+  locatedlibsets = libs: builtins.map flattenedLibSubset (libset libs);
+  removeFilenamePrefix = prefix: filename:
+    let
+    prefixLen = (builtins.stringLength prefix) + 1; # +1 to remove the leading /
+      filenameLen = builtins.stringLength filename;
+      substr = builtins.substring prefixLen filenameLen filename;
+      in substr;
+
+  removeNixpkgs = removeFilenamePrefix (builtins.toString pkgs.path);
+
+  liblocations =
+    builtins.filter
+      (elem: elem.value != null)
+      (nixpkgsLib.lists.flatten
+        (locatedlibsets nixpkgsLib));
+
+  fnLocationRelative = { name, value }:
+    {
+      inherit name;
+      value = value // { file = removeNixpkgs value.file; };
+    };
+
+  relativeLocs = (builtins.map fnLocationRelative liblocations);
+  sanitizeId = builtins.replaceStrings
+    [ "'"      ]
+    [ "-prime" ];
+
+  urlPrefix = "https://github.com/NixOS/nixpkgs/blob/${revision}";
+  jsonLocs = builtins.listToAttrs
+    (builtins.map
+      ({ name, value }: {
+        name = sanitizeId name;
+        value =
+          let
+            text = "${value.file}:${builtins.toString value.line}";
+            target = "${urlPrefix}/${value.file}#L${builtins.toString value.line}";
+          in
+            "[${text}](${target}) in `<nixpkgs>`";
+      })
+    relativeLocs);
+
+in
+pkgs.writeText "locations.json" (builtins.toJSON jsonLocs)
diff --git a/nixpkgs/doc/functions.md b/nixpkgs/doc/functions.md
new file mode 100644
index 000000000000..09033c9e3c19
--- /dev/null
+++ b/nixpkgs/doc/functions.md
@@ -0,0 +1,11 @@
+# Functions reference {#chap-functions}
+
+The nixpkgs repository has several utility functions to manipulate Nix expressions.
+
+```{=include=} sections
+functions/library.md
+functions/generators.section.md
+functions/debug.section.md
+functions/prefer-remote-fetch.section.md
+functions/nix-gitignore.section.md
+```
diff --git a/nixpkgs/doc/functions/debug.section.md b/nixpkgs/doc/functions/debug.section.md
new file mode 100644
index 000000000000..b2d8589431ab
--- /dev/null
+++ b/nixpkgs/doc/functions/debug.section.md
@@ -0,0 +1,5 @@
+# Debugging Nix Expressions {#sec-debug}
+
+Nix is a unityped, dynamic language, this means every value can potentially appear anywhere. Since it is also non-strict, evaluation order and what ultimately is evaluated might surprise you. Therefore it is important to be able to debug nix expressions.
+
+In the `lib/debug.nix` file you will find a number of functions that help (pretty-)printing values while evaluation is running. You can even specify how deep these values should be printed recursively, and transform them on the fly. Please consult the docstrings in `lib/debug.nix` for usage information.
diff --git a/nixpkgs/doc/functions/generators.section.md b/nixpkgs/doc/functions/generators.section.md
new file mode 100644
index 000000000000..dbfc302a3abf
--- /dev/null
+++ b/nixpkgs/doc/functions/generators.section.md
@@ -0,0 +1,57 @@
+# Generators {#sec-generators}
+Generators are functions that create file formats from nix data structures, e. g. for configuration files. There are generators available for: `INI`, `JSON` and `YAML`
+
+All generators follow a similar call interface: `generatorName configFunctions data`, where `configFunctions` is an attrset of user-defined functions that format nested parts of the content. They each have common defaults, so often they do not need to be set manually. An example is `mkSectionName ? (name: libStr.escape [ "[" "]" ] name)` from the `INI` generator. It receives the name of a section and sanitizes it. The default `mkSectionName` escapes `[` and `]` with a backslash.
+
+Generators can be fine-tuned to produce exactly the file format required by your application/service. One example is an INI-file format which uses `: ` as separator, the strings `"yes"`/`"no"` as boolean values and requires all string values to be quoted:
+
+```nix
+let
+  inherit (lib) generators isString;
+
+  customToINI = generators.toINI {
+    # specifies how to format a key/value pair
+    mkKeyValue = generators.mkKeyValueDefault {
+      # specifies the generated string for a subset of nix values
+      mkValueString = v:
+             if v == true then ''"yes"''
+        else if v == false then ''"no"''
+        else if isString v then ''"${v}"''
+        # and delegates all other values to the default generator
+        else generators.mkValueStringDefault {} v;
+    } ":";
+  };
+
+# the INI file can now be given as plain old nix values
+in customToINI {
+  main = {
+    pushinfo = true;
+    autopush = false;
+    host = "localhost";
+    port = 42;
+  };
+  mergetool = {
+    merge = "diff3";
+  };
+}
+```
+
+This will produce the following INI file as nix string:
+
+```INI
+[main]
+autopush:"no"
+host:"localhost"
+port:42
+pushinfo:"yes"
+str\:ange:"very::strange"
+
+[mergetool]
+merge:"diff3"
+```
+
+::: {.note}
+Nix store paths can be converted to strings by enclosing a derivation attribute like so: `"${drv}"`.
+:::
+
+Detailed documentation for each generator can be found in `lib/generators.nix`.
diff --git a/nixpkgs/doc/functions/library.md.in b/nixpkgs/doc/functions/library.md.in
new file mode 100644
index 000000000000..e17de86feb8a
--- /dev/null
+++ b/nixpkgs/doc/functions/library.md.in
@@ -0,0 +1,5 @@
+# Nixpkgs Library Functions {#sec-functions-library}
+
+Nixpkgs provides a standard library at `pkgs.lib`, or through `import <nixpkgs/lib>`.
+
+<!-- nixdoc-generated documentation must be appended here during build! -->
diff --git a/nixpkgs/doc/functions/library/.gitkeep b/nixpkgs/doc/functions/library/.gitkeep
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/nixpkgs/doc/functions/library/.gitkeep
diff --git a/nixpkgs/doc/functions/nix-gitignore.section.md b/nixpkgs/doc/functions/nix-gitignore.section.md
new file mode 100644
index 000000000000..8532ab68ac04
--- /dev/null
+++ b/nixpkgs/doc/functions/nix-gitignore.section.md
@@ -0,0 +1,56 @@
+# pkgs.nix-gitignore {#sec-pkgs-nix-gitignore}
+
+`pkgs.nix-gitignore` is a function that acts similarly to `builtins.filterSource` but also allows filtering with the help of the gitignore format.
+
+## Usage {#sec-pkgs-nix-gitignore-usage}
+
+`pkgs.nix-gitignore` exports a number of functions, but you'll most likely need either `gitignoreSource` or `gitignoreSourcePure`. As their first argument, they both accept either 1. a file with gitignore lines or 2. a string with gitignore lines, or 3. a list of either of the two. They will be concatenated into a single big string.
+
+```nix
+{ pkgs ? import <nixpkgs> {} }: {
+
+ src = nix-gitignore.gitignoreSource [] ./source;
+     # Simplest version
+
+ src = nix-gitignore.gitignoreSource "supplemental-ignores\n" ./source;
+     # This one reads the ./source/.gitignore and concats the auxiliary ignores
+
+ src = nix-gitignore.gitignoreSourcePure "ignore-this\nignore-that\n" ./source;
+     # Use this string as gitignore, don't read ./source/.gitignore.
+
+ src = nix-gitignore.gitignoreSourcePure ["ignore-this\nignore-that\n" ~/.gitignore] ./source;
+     # It also accepts a list (of strings and paths) that will be concatenated
+     # once the paths are turned to strings via readFile.
+}
+```
+
+These functions are derived from the `Filter` functions by setting the first filter argument to `(_: _: true)`:
+
+```nix
+{
+  gitignoreSourcePure = gitignoreFilterSourcePure (_: _: true);
+  gitignoreSource = gitignoreFilterSource (_: _: true);
+}
+```
+
+Those filter functions accept the same arguments the `builtins.filterSource` function would pass to its filters, thus `fn: gitignoreFilterSourcePure fn ""` should be extensionally equivalent to `filterSource`. The file is blacklisted if it's blacklisted by either your filter or the gitignoreFilter.
+
+If you want to make your own filter from scratch, you may use
+
+```nix
+{
+  gitignoreFilter = ign: root: filterPattern (gitignoreToPatterns ign) root;
+}
+```
+
+## gitignore files in subdirectories {#sec-pkgs-nix-gitignore-usage-recursive}
+
+If you wish to use a filter that would search for .gitignore files in subdirectories, just like git does by default, use this function:
+
+```nix
+{
+  # gitignoreFilterRecursiveSource = filter: patterns: root:
+  # OR
+  gitignoreRecursiveSource = gitignoreFilterSourcePure (_: _: true);
+}
+```
diff --git a/nixpkgs/doc/functions/prefer-remote-fetch.section.md b/nixpkgs/doc/functions/prefer-remote-fetch.section.md
new file mode 100644
index 000000000000..8760c100224a
--- /dev/null
+++ b/nixpkgs/doc/functions/prefer-remote-fetch.section.md
@@ -0,0 +1,17 @@
+# prefer-remote-fetch overlay {#sec-prefer-remote-fetch}
+
+`prefer-remote-fetch` is an overlay that download sources on remote builder. This is useful when the evaluating machine has a slow upload while the builder can fetch faster directly from the source. To use it, put the following snippet as a new overlay:
+
+```nix
+self: super:
+  (super.prefer-remote-fetch self super)
+```
+
+A full configuration example for that sets the overlay up for your own account, could look like this
+
+```ShellSession
+$ mkdir ~/.config/nixpkgs/overlays/
+$ cat > ~/.config/nixpkgs/overlays/prefer-remote-fetch.nix <<EOF
+  self: super: super.prefer-remote-fetch self super
+EOF
+```
diff --git a/nixpkgs/doc/hooks/autoconf.section.md b/nixpkgs/doc/hooks/autoconf.section.md
new file mode 100644
index 000000000000..90e4681ef93f
--- /dev/null
+++ b/nixpkgs/doc/hooks/autoconf.section.md
@@ -0,0 +1,3 @@
+# Autoconf {#setup-hook-autoconf}
+
+The `autoreconfHook` derivation adds `autoreconfPhase`, which runs autoreconf, libtoolize and automake, essentially preparing the configure script in autotools-based builds. Most autotools-based packages come with the configure script pre-generated, but this hook is necessary for a few packages and when you need to patch the package’s configure scripts.
diff --git a/nixpkgs/doc/hooks/automake.section.md b/nixpkgs/doc/hooks/automake.section.md
new file mode 100644
index 000000000000..dd0ff9c0cc09
--- /dev/null
+++ b/nixpkgs/doc/hooks/automake.section.md
@@ -0,0 +1,3 @@
+# Automake {#setup-hook-automake}
+
+Adds the `share/aclocal` subdirectory of each build input to the `ACLOCAL_PATH` environment variable.
diff --git a/nixpkgs/doc/hooks/autopatchelf.section.md b/nixpkgs/doc/hooks/autopatchelf.section.md
new file mode 100644
index 000000000000..995204b90219
--- /dev/null
+++ b/nixpkgs/doc/hooks/autopatchelf.section.md
@@ -0,0 +1,11 @@
+# autoPatchelfHook {#setup-hook-autopatchelfhook}
+
+This is a special setup hook which helps in packaging proprietary software in that it automatically tries to find missing shared library dependencies of ELF files based on the given `buildInputs` and `nativeBuildInputs`.
+
+You can also specify a `runtimeDependencies` variable which lists dependencies to be unconditionally added to rpath of all executables. This is useful for programs that use dlopen 3 to load libraries at runtime.
+
+In certain situations you may want to run the main command (`autoPatchelf`) of the setup hook on a file or a set of directories instead of unconditionally patching all outputs. This can be done by setting the `dontAutoPatchelf` environment variable to a non-empty value.
+
+By default `autoPatchelf` will fail as soon as any ELF file requires a dependency which cannot be resolved via the given build inputs. In some situations you might prefer to just leave missing dependencies unpatched and continue to patch the rest. This can be achieved by setting the `autoPatchelfIgnoreMissingDeps` environment variable to a non-empty value. `autoPatchelfIgnoreMissingDeps` can be set to a list like `autoPatchelfIgnoreMissingDeps = [ "libcuda.so.1" "libcudart.so.1" ];` or to `[ "*" ]` to ignore all missing dependencies.
+
+The `autoPatchelf` command also recognizes a `--no-recurse` command line flag, which prevents it from recursing into subdirectories.
diff --git a/nixpkgs/doc/hooks/bmake.section.md b/nixpkgs/doc/hooks/bmake.section.md
new file mode 100644
index 000000000000..6b40ac13e8b9
--- /dev/null
+++ b/nixpkgs/doc/hooks/bmake.section.md
@@ -0,0 +1,7 @@
+# bmake {#bmake-hook}
+
+[bmake](https://www.crufty.net/help/sjg/bmake.html) is the portable variant of
+NetBSD make utility.
+
+In Nixpkgs, `bmake` comes with a hook that overrides the default build, check,
+install and dist phases.
diff --git a/nixpkgs/doc/hooks/breakpoint.section.md b/nixpkgs/doc/hooks/breakpoint.section.md
new file mode 100644
index 000000000000..b7f1979586de
--- /dev/null
+++ b/nixpkgs/doc/hooks/breakpoint.section.md
@@ -0,0 +1,17 @@
+# breakpointHook {#breakpointhook}
+
+This hook will make a build pause instead of stopping when a failure happens. It prevents nix from cleaning up the build environment immediately and allows the user to attach to a build environment using the `cntr` command. Upon build error it will print instructions on how to use `cntr`, which can be used to enter the environment for debugging. Installing cntr and running the command will provide shell access to the build sandbox of failed build. At `/var/lib/cntr` the sandboxed filesystem is mounted. All commands and files of the system are still accessible within the shell. To execute commands from the sandbox use the cntr exec subcommand. `cntr` is only supported on Linux-based platforms. To use it first add `cntr` to your `environment.systemPackages` on NixOS or alternatively to the root user on non-NixOS systems. Then in the package that is supposed to be inspected, add `breakpointHook` to `nativeBuildInputs`.
+
+```nix
+{
+  nativeBuildInputs = [ breakpointHook ];
+}
+```
+
+When a build failure happens there will be an instruction printed that shows how to attach with `cntr` to the build sandbox.
+
+::: {.note}
+Caution with remote builds
+
+This won’t work with remote builds as the build environment is on a different machine and can’t be accessed by `cntr`. Remote builds can be turned off by setting `--option builders ''` for `nix-build` or `--builders ''` for `nix build`.
+:::
diff --git a/nixpkgs/doc/hooks/cmake.section.md b/nixpkgs/doc/hooks/cmake.section.md
new file mode 100644
index 000000000000..b5dc5a914434
--- /dev/null
+++ b/nixpkgs/doc/hooks/cmake.section.md
@@ -0,0 +1,3 @@
+# cmake {#cmake}
+
+Overrides the default configure phase to run the CMake command. By default, we use the Make generator of CMake. In addition, dependencies are added automatically to `CMAKE_PREFIX_PATH` so that packages are correctly detected by CMake. Some additional flags are passed in to give similar behavior to configure-based packages. You can disable this hook’s behavior by setting `configurePhase` to a custom value, or by setting `dontUseCmakeConfigure`. `cmakeFlags` controls flags passed only to CMake. By default, parallel building is enabled as CMake supports parallel building almost everywhere. When Ninja is also in use, CMake will detect that and use the ninja generator.
diff --git a/nixpkgs/doc/hooks/gdk-pixbuf.section.md b/nixpkgs/doc/hooks/gdk-pixbuf.section.md
new file mode 100644
index 000000000000..cf7203dfc66f
--- /dev/null
+++ b/nixpkgs/doc/hooks/gdk-pixbuf.section.md
@@ -0,0 +1,3 @@
+# gdk-pixbuf {#setup-hook-gdk-pixbuf}
+
+Exports `GDK_PIXBUF_MODULE_FILE` environment variable to the builder. Add librsvg package to `buildInputs` to get svg support. See also the [setup hook description in GNOME platform docs](#ssec-gnome-hooks-gdk-pixbuf).
diff --git a/nixpkgs/doc/hooks/ghc.section.md b/nixpkgs/doc/hooks/ghc.section.md
new file mode 100644
index 000000000000..ac054b954a92
--- /dev/null
+++ b/nixpkgs/doc/hooks/ghc.section.md
@@ -0,0 +1,3 @@
+# GHC {#ghc}
+
+Creates a temporary package database and registers every Haskell build input in it (TODO: how?).
diff --git a/nixpkgs/doc/hooks/gnome.section.md b/nixpkgs/doc/hooks/gnome.section.md
new file mode 100644
index 000000000000..b10e80802027
--- /dev/null
+++ b/nixpkgs/doc/hooks/gnome.section.md
@@ -0,0 +1,3 @@
+# GNOME platform {#gnome-platform}
+
+Hooks related to GNOME platform and related libraries like GLib, GTK and GStreamer are described in [](#sec-language-gnome).
diff --git a/nixpkgs/doc/hooks/index.md b/nixpkgs/doc/hooks/index.md
new file mode 100644
index 000000000000..1534ef85ccb9
--- /dev/null
+++ b/nixpkgs/doc/hooks/index.md
@@ -0,0 +1,35 @@
+# Hooks reference {#chap-hooks}
+
+Nixpkgs has several hook packages that augment the stdenv phases.
+
+The stdenv built-in hooks are documented in [](#ssec-setup-hooks).
+
+```{=include=} sections
+autoconf.section.md
+automake.section.md
+autopatchelf.section.md
+bmake.section.md
+breakpoint.section.md
+cmake.section.md
+gdk-pixbuf.section.md
+ghc.section.md
+gnome.section.md
+installShellFiles.section.md
+libiconv.section.md
+libxml2.section.md
+meson.section.md
+mpi-check-hook.section.md
+ninja.section.md
+patch-rc-path-hooks.section.md
+perl.section.md
+pkg-config.section.md
+postgresql-test-hook.section.md
+python.section.md
+scons.section.md
+tetex-tex-live.section.md
+unzip.section.md
+validatePkgConfig.section.md
+waf.section.md
+zig.section.md
+xcbuild.section.md
+```
diff --git a/nixpkgs/doc/hooks/installShellFiles.section.md b/nixpkgs/doc/hooks/installShellFiles.section.md
new file mode 100644
index 000000000000..834c6a37df15
--- /dev/null
+++ b/nixpkgs/doc/hooks/installShellFiles.section.md
@@ -0,0 +1,27 @@
+# `installShellFiles` {#installshellfiles}
+
+This hook helps with installing manpages and shell completion files. It exposes 2 shell functions `installManPage` and `installShellCompletion` that can be used from your `postInstall` hook.
+
+The `installManPage` function takes one or more paths to manpages to install. The manpages must have a section suffix, and may optionally be compressed (with `.gz` suffix). This function will place them into the correct `share/man/man<section>/` directory, in [`outputMan`](#outputman).
+
+The `installShellCompletion` function takes one or more paths to shell completion files. By default it will autodetect the shell type from the completion file extension, but you may also specify it by passing one of `--bash`, `--fish`, or `--zsh`. These flags apply to all paths listed after them (up until another shell flag is given). Each path may also have a custom installation name provided by providing a flag `--name NAME` before the path. If this flag is not provided, zsh completions will be renamed automatically such that `foobar.zsh` becomes `_foobar`. A root name may be provided for all paths using the flag `--cmd NAME`; this synthesizes the appropriate name depending on the shell (e.g. `--cmd foo` will synthesize the name `foo.bash` for bash and `_foo` for zsh). The path may also be a fifo or named fd (such as produced by `<(cmd)`), in which case the shell and name must be provided.
+
+```nix
+{
+  nativeBuildInputs = [ installShellFiles ];
+  postInstall = ''
+    installManPage doc/foobar.1 doc/barfoo.3
+    # explicit behavior
+    installShellCompletion --bash --name foobar.bash share/completions.bash
+    installShellCompletion --fish --name foobar.fish share/completions.fish
+    installShellCompletion --zsh --name _foobar share/completions.zsh
+    # implicit behavior
+    installShellCompletion share/completions/foobar.{bash,fish,zsh}
+    # using named fd
+    installShellCompletion --cmd foobar \
+      --bash <($out/bin/foobar --bash-completion) \
+      --fish <($out/bin/foobar --fish-completion) \
+      --zsh <($out/bin/foobar --zsh-completion)
+  '';
+}
+```
diff --git a/nixpkgs/doc/hooks/libiconv.section.md b/nixpkgs/doc/hooks/libiconv.section.md
new file mode 100644
index 000000000000..0ffa6d09b0a8
--- /dev/null
+++ b/nixpkgs/doc/hooks/libiconv.section.md
@@ -0,0 +1,3 @@
+# libiconv, libintl {#libiconv-libintl}
+
+A few libraries automatically add to `NIX_LDFLAGS` their library, making their symbols automatically available to the linker. This includes libiconv and libintl (gettext). This is done to provide compatibility between GNU Linux, where libiconv and libintl are bundled in, and other systems where that might not be the case. Sometimes, this behavior is not desired. To disable this behavior, set `dontAddExtraLibs`.
diff --git a/nixpkgs/doc/hooks/libxml2.section.md b/nixpkgs/doc/hooks/libxml2.section.md
new file mode 100644
index 000000000000..df387fb5e222
--- /dev/null
+++ b/nixpkgs/doc/hooks/libxml2.section.md
@@ -0,0 +1,3 @@
+# libxml2 {#setup-hook-libxml2}
+
+Adds every file named `catalog.xml` found under the `xml/dtd` and `xml/xsl` subdirectories of each build input to the `XML_CATALOG_FILES` environment variable.
diff --git a/nixpkgs/doc/hooks/meson.section.md b/nixpkgs/doc/hooks/meson.section.md
new file mode 100644
index 000000000000..3a7fb5032082
--- /dev/null
+++ b/nixpkgs/doc/hooks/meson.section.md
@@ -0,0 +1,83 @@
+# Meson {#meson}
+
+[Meson](https://mesonbuild.com/) is an open source meta build system meant to be
+fast and user-friendly.
+
+In Nixpkgs, meson comes with a setup hook that overrides the configure, check,
+and install phases.
+
+Being a meta build system, meson needs an accompanying backend. In the context
+of Nixpkgs, the typical companion backend is [Ninja](#ninja), that provides a
+setup hook registering ninja-based build and install phases.
+
+## Variables controlling Meson {#meson-variables-controlling}
+
+### Meson Exclusive Variables {#meson-exclusive-variables}
+
+#### `mesonFlags` {#meson-flags}
+
+Controls the flags passed to `meson setup` during configure phase.
+
+#### `mesonWrapMode` {#meson-wrap-mode}
+
+Which value is passed as
+[`-Dwrap_mode=`](https://mesonbuild.com/Builtin-options.html#core-options)
+to. In Nixpkgs the default value is `nodownload`, so that no subproject will be
+downloaded (since network access is already disabled during deployment in
+Nixpkgs).
+
+Note: Meson allows pre-population of subprojects that would otherwise be
+downloaded.
+
+#### `mesonBuildType` {#meson-build-type}
+
+Which value is passed as
+[`--buildtype`](https://mesonbuild.com/Builtin-options.html#core-options) to
+`meson setup` during configure phase. In Nixpkgs the default value is `plain`.
+
+#### `mesonAutoFeatures` {#meson-auto-features}
+
+Which value is passed as
+[`-Dauto_features=`](https://mesonbuild.com/Builtin-options.html#core-options)
+to `meson setup` during configure phase. In Nixpkgs the default value is
+`enabled`, meaning that every feature declared as "auto" by the meson scripts
+will be enabled.
+
+#### `mesonCheckFlags` {#meson-check-flags}
+
+Controls the flags passed to `meson test` during check phase.
+
+#### `mesonInstallFlags` {#meson-install-flags}
+
+Controls the flags passed to `meson install` during install phase.
+
+#### `mesonInstallTags` {#meson-install-tags}
+
+A list of installation tags passed to Meson's commandline option
+[`--tags`](https://mesonbuild.com/Installing.html#installation-tags) during
+install phase.
+
+Note: `mesonInstallTags` should be a list of strings, that will be converted to
+a comma-separated string that is recognized to `--tags`.
+Example: `mesonInstallTags = [ "emulator" "assembler" ];` will be converted to
+`--tags emulator,assembler`.
+
+#### `dontUseMesonConfigure` {#dont-use-meson-configure}
+
+When set to true, don't use the predefined `mesonConfigurePhase`.
+
+#### `dontUseMesonCheck` {#dont-use-meson-check}
+
+When set to true, don't use the predefined `mesonCheckPhase`.
+
+#### `dontUseMesonInstall` {#dont-use-meson-install}
+
+When set to true, don't use the predefined `mesonInstallPhase`.
+
+### Honored variables {#meson-honored-variables}
+
+The following variables commonly used by `stdenv.mkDerivation` are honored by
+Meson setup hook.
+
+- `prefixKey`
+- `enableParallelBuilding`
diff --git a/nixpkgs/doc/hooks/mpi-check-hook.section.md b/nixpkgs/doc/hooks/mpi-check-hook.section.md
new file mode 100644
index 000000000000..c182c4cc6195
--- /dev/null
+++ b/nixpkgs/doc/hooks/mpi-check-hook.section.md
@@ -0,0 +1,25 @@
+#  mpiCheckPhaseHook {#setup-hook-mpi-check}
+
+
+This hook can be used to setup a check phase that
+requires running a MPI application. It detects the
+used present MPI implementation type and exports
+the neceesary environment variables to use
+`mpirun` and `mpiexec` in a Nix sandbox.
+
+
+Example:
+
+```nix
+  { mpiCheckPhaseHook, mpi, ... }:
+  {
+    # ...
+
+    nativeCheckInputs = [
+      openssh
+      mpiCheckPhaseHook
+    ];
+  }
+```
+
+
diff --git a/nixpkgs/doc/hooks/ninja.section.md b/nixpkgs/doc/hooks/ninja.section.md
new file mode 100644
index 000000000000..bbc948108804
--- /dev/null
+++ b/nixpkgs/doc/hooks/ninja.section.md
@@ -0,0 +1,5 @@
+# ninja {#ninja}
+
+Overrides the build, install, and check phase to run ninja instead of make. You can disable this behavior with the `dontUseNinjaBuild`, `dontUseNinjaInstall`, and `dontUseNinjaCheck`, respectively. Parallel building is enabled by default in Ninja.
+
+Note that if the [Meson setup hook](#meson) is also active, Ninja's install and check phases will be disabled in favor of Meson's.
diff --git a/nixpkgs/doc/hooks/patch-rc-path-hooks.section.md b/nixpkgs/doc/hooks/patch-rc-path-hooks.section.md
new file mode 100644
index 000000000000..5c870dc782c2
--- /dev/null
+++ b/nixpkgs/doc/hooks/patch-rc-path-hooks.section.md
@@ -0,0 +1,50 @@
+
+# `patchRcPath` hooks {#sec-patchRcPathHooks}
+
+These hooks provide shell-specific utilities (with the same name as the hook) to patch shell scripts meant to be sourced by software users.
+
+The typical usage is to patch initialisation or [rc](https://unix.stackexchange.com/questions/3467/what-does-rc-in-bashrc-stand-for) scripts inside `$out/bin` or `$out/etc`.
+Such scripts, when being sourced, would insert the binary locations of certain commands into `PATH`, modify other environment variables or run a series of start-up commands.
+When shipped from the upstream, they sometimes use commands that might not be available in the environment they are getting sourced in.
+
+The compatible shells for each hook are:
+
+ - `patchRcPathBash`: [Bash](https://www.gnu.org/software/bash/), [ksh](http://www.kornshell.org/), [zsh](https://www.zsh.org/) and other shells supporting the Bash-like parameter expansions.
+ - `patchRcPathCsh`: Csh scripts, such as those targeting [tcsh](https://www.tcsh.org/).
+ - `patchRcPathFish`: [Fish](https://fishshell.com/) scripts.
+ - `patchRcPathPosix`: POSIX-conformant shells supporting the limited parameter expansions specified by the POSIX standard. Current implementation uses the parameter expansion `${foo-}` only.
+
+For each supported shell, it modifies the script with a `PATH` prefix that is later removed when the script ends.
+It allows nested patching, which guarantees that a patched script may source another patched script.
+
+Syntax to apply the utility to a script:
+
+```sh
+patchRcPath<shell> <file> <PATH-prefix>
+```
+
+Example usage:
+
+Given a package `foo` containing an init script `this-foo.fish` that depends on `coreutils`, `man` and `which`,
+patch the init script for users to source without having the above dependencies in their `PATH`:
+
+```nix
+{ lib, stdenv, patchRcPathFish}:
+stdenv.mkDerivation {
+
+  # ...
+
+  nativeBuildInputs = [
+    patchRcPathFish
+  ];
+
+  postFixup = ''
+    patchRcPathFish $out/bin/this-foo.fish ${lib.makeBinPath [ coreutils man which ]}
+  '';
+}
+```
+
+::: {.note}
+`patchRcPathCsh` and `patchRcPathPosix` implementation depends on `sed` to do the string processing.
+The others are in vanilla shell and have no third-party dependencies.
+:::
diff --git a/nixpkgs/doc/hooks/perl.section.md b/nixpkgs/doc/hooks/perl.section.md
new file mode 100644
index 000000000000..06942bd3c0e1
--- /dev/null
+++ b/nixpkgs/doc/hooks/perl.section.md
@@ -0,0 +1,3 @@
+# Perl {#setup-hook-perl}
+
+Adds the `lib/site_perl` subdirectory of each build input to the `PERL5LIB` environment variable. For instance, if `buildInputs` contains Perl, then the `lib/site_perl` subdirectory of each input is added to the `PERL5LIB` environment variable.
diff --git a/nixpkgs/doc/hooks/pkg-config.section.md b/nixpkgs/doc/hooks/pkg-config.section.md
new file mode 100644
index 000000000000..c98701cf9c9d
--- /dev/null
+++ b/nixpkgs/doc/hooks/pkg-config.section.md
@@ -0,0 +1,3 @@
+# pkg-config {#setup-hook-pkg-config}
+
+Adds the `lib/pkgconfig` and `share/pkgconfig` subdirectories of each build input to the `PKG_CONFIG_PATH` environment variable.
diff --git a/nixpkgs/doc/hooks/postgresql-test-hook.section.md b/nixpkgs/doc/hooks/postgresql-test-hook.section.md
new file mode 100644
index 000000000000..59d7f7a644c9
--- /dev/null
+++ b/nixpkgs/doc/hooks/postgresql-test-hook.section.md
@@ -0,0 +1,66 @@
+
+# `postgresqlTestHook` {#sec-postgresqlTestHook}
+
+This hook starts a PostgreSQL server during the `checkPhase`. Example:
+
+```nix
+{ stdenv, postgresql, postgresqlTestHook }:
+stdenv.mkDerivation {
+
+  # ...
+
+  nativeCheckInputs = [
+    postgresql
+    postgresqlTestHook
+  ];
+}
+```
+
+If you use a custom `checkPhase`, remember to add the `runHook` calls:
+```nix
+  checkPhase ''
+    runHook preCheck
+
+    # ... your tests
+
+    runHook postCheck
+  ''
+```
+
+## Variables {#sec-postgresqlTestHook-variables}
+
+The hook logic will read a number of variables and set them to a default value if unset or empty.
+
+Exported variables:
+
+ - `PGDATA`: location of server files.
+ - `PGHOST`: location of UNIX domain socket directory; the default `host` in a connection string.
+ - `PGUSER`: user to create / log in with, default: `test_user`.
+ - `PGDATABASE`: database name, default: `test_db`.
+
+Bash-only variables:
+
+ - `postgresqlTestUserOptions`: SQL options to use when creating the `$PGUSER` role, default: `"LOGIN"`. Example: `"LOGIN SUPERUSER"`
+ - `postgresqlTestSetupSQL`: SQL commands to run as database administrator after startup, default: statements that create `$PGUSER` and `$PGDATABASE`.
+ - `postgresqlTestSetupCommands`: bash commands to run after database start, defaults to running `$postgresqlTestSetupSQL` as database administrator.
+ - `postgresqlEnableTCP`: set to `1` to enable TCP listening. Flaky; not recommended.
+ - `postgresqlStartCommands`: defaults to `pg_ctl start`.
+ - `postgresqlExtraSettings`: Additional configuration to add to `postgresql.conf`
+
+## Hooks {#sec-postgresqlTestHook-hooks}
+
+A number of additional hooks are ran in postgresqlTestHook
+
+ - `postgresqlTestSetupPost`: ran after postgresql has been set up.
+
+## TCP and the Nix sandbox {#sec-postgresqlTestHook-tcp}
+
+`postgresqlEnableTCP` relies on network sandboxing, which is not available on macOS and some custom Nix installations, resulting in flaky tests.
+For this reason, it is disabled by default.
+
+The preferred solution is to make the test suite use a UNIX domain socket connection. This is the default behavior when no `host` connection parameter is provided.
+Some test suites hardcode a value for `host` though, so a patch may be required. If you can upstream the patch, you can make `host` default to the `PGHOST` environment variable when set. Otherwise, you can patch it locally to omit the `host` connection string parameter altogether.
+
+::: {.note}
+The error `libpq: failed (could not receive data from server: Connection refused` is generally an indication that the test suite is trying to connect through TCP.
+:::
diff --git a/nixpkgs/doc/hooks/python.section.md b/nixpkgs/doc/hooks/python.section.md
new file mode 100644
index 000000000000..b7862650f167
--- /dev/null
+++ b/nixpkgs/doc/hooks/python.section.md
@@ -0,0 +1,3 @@
+# Python {#setup-hook-python}
+
+Adds the `python.sitePackages` subdirectory (i.e. `lib/pythonX.Y/site-packages`) of each build input to the `PYTHONPATH` environment variable.
diff --git a/nixpkgs/doc/hooks/scons.section.md b/nixpkgs/doc/hooks/scons.section.md
new file mode 100644
index 000000000000..0a7a7aa023b6
--- /dev/null
+++ b/nixpkgs/doc/hooks/scons.section.md
@@ -0,0 +1,3 @@
+# scons {#scons}
+
+Overrides the build, install, and check phases. This uses the scons build system as a replacement for make. scons does not provide a configure phase, so everything is managed at build and install time.
diff --git a/nixpkgs/doc/hooks/tetex-tex-live.section.md b/nixpkgs/doc/hooks/tetex-tex-live.section.md
new file mode 100644
index 000000000000..b702971d727c
--- /dev/null
+++ b/nixpkgs/doc/hooks/tetex-tex-live.section.md
@@ -0,0 +1,3 @@
+# teTeX / TeX Live {#tetex-tex-live}
+
+Adds the `share/texmf-nix` subdirectory of each build input to the `TEXINPUTS` environment variable.
diff --git a/nixpkgs/doc/hooks/unzip.section.md b/nixpkgs/doc/hooks/unzip.section.md
new file mode 100644
index 000000000000..5ec67e576a33
--- /dev/null
+++ b/nixpkgs/doc/hooks/unzip.section.md
@@ -0,0 +1,3 @@
+# unzip {#unzip}
+
+This setup hook will allow you to unzip .zip files specified in `$src`. There are many similar packages like `unrar`, `undmg`, etc.
diff --git a/nixpkgs/doc/hooks/validatePkgConfig.section.md b/nixpkgs/doc/hooks/validatePkgConfig.section.md
new file mode 100644
index 000000000000..aa6e0c06c223
--- /dev/null
+++ b/nixpkgs/doc/hooks/validatePkgConfig.section.md
@@ -0,0 +1,3 @@
+# validatePkgConfig {#validatepkgconfig}
+
+The `validatePkgConfig` hook validates all pkg-config (`.pc`) files in a package. This helps catching some common errors in pkg-config files, such as undefined variables.
diff --git a/nixpkgs/doc/hooks/waf.section.md b/nixpkgs/doc/hooks/waf.section.md
new file mode 100644
index 000000000000..fa027d87a94d
--- /dev/null
+++ b/nixpkgs/doc/hooks/waf.section.md
@@ -0,0 +1,58 @@
+# wafHook {#waf-hook}
+
+[Waf](https://waf.io) is a Python-based software building system.
+
+In Nixpkgs, `wafHook` overrides the default configure, build, and install phases.
+
+## Variables controlling wafHook {#waf-hook-variables-controlling}
+
+### `wafHook` Exclusive Variables {#waf-hook-exclusive-variables}
+
+The variables below are exclusive of `wafHook`.
+
+#### `wafPath` {#waf-path}
+
+Location of the `waf` tool. It defaults to `./waf`, to honor software projects that include it directly inside their source trees.
+
+If `wafPath` doesn't exist, then `wafHook` will copy the `waf` provided from Nixpkgs to it.
+
+#### `wafFlags` {#waf-flags}
+
+Controls the flags passed to waf tool during build and install phases. For settings specific to build or install phases, use `wafBuildFlags` or `wafInstallFlags` respectively.
+
+#### `dontAddWafCrossFlags` {#dont-add-waf-cross-flags}
+
+When set to `true`, don't add cross compilation flags during configure phase.
+
+#### `dontUseWafConfigure` {#dont-use-waf-configure}
+
+When set to true, don't use the predefined `wafConfigurePhase`.
+
+#### `dontUseWafBuild` {#dont-use-waf-build}
+
+When set to true, don't use the predefined `wafBuildPhase`.
+
+#### `dontUseWafInstall` {#dont-use-waf-install}
+
+When set to true, don't use the predefined `wafInstallPhase`.
+
+### Similar variables {#waf-hook-similar-variables}
+
+The following variables are similar to their `stdenv.mkDerivation` counterparts.
+
+| `wafHook` Variable    | `stdenv.mkDerivation` Counterpart |
+|-----------------------|-----------------------------------|
+| `wafConfigureFlags`   | `configureFlags`                  |
+| `wafConfigureTargets` | `configureTargets`                |
+| `wafBuildFlags`       | `buildFlags`                      |
+| `wafBuildTargets`     | `buildTargets`                    |
+| `wafInstallFlags`     | `installFlags`                    |
+| `wafInstallTargets`   | `installTargets`                  |
+
+### Honored variables {#waf-hook-honored-variables}
+
+The following variables commonly used by `stdenv.mkDerivation` are honored by `wafHook`.
+
+- `prefixKey`
+- `enableParallelBuilding`
+- `enableParallelInstalling`
diff --git a/nixpkgs/doc/hooks/xcbuild.section.md b/nixpkgs/doc/hooks/xcbuild.section.md
new file mode 100644
index 000000000000..bf404b64c3f9
--- /dev/null
+++ b/nixpkgs/doc/hooks/xcbuild.section.md
@@ -0,0 +1,3 @@
+# xcbuildHook {#xcbuildhook}
+
+Overrides the build and install phases to run the "xcbuild" command. This hook is needed when a project only comes with build files for the XCode build system. You can disable this behavior by setting buildPhase and configurePhase to a custom value. xcbuildFlags controls flags passed only to xcbuild.
diff --git a/nixpkgs/doc/hooks/zig.section.md b/nixpkgs/doc/hooks/zig.section.md
new file mode 100644
index 000000000000..1a09491649d7
--- /dev/null
+++ b/nixpkgs/doc/hooks/zig.section.md
@@ -0,0 +1,63 @@
+# zig.hook {#zig-hook}
+
+[Zig](https://ziglang.org/) is a general-purpose programming language and toolchain for maintaining robust, optimal and reusable software.
+
+In Nixpkgs, `zig.hook` overrides the default build, check and install phases.
+
+## Example code snippet {#zig-hook-example-code-snippet}
+
+```nix
+{ lib
+, stdenv
+, zig_0_11
+}:
+
+stdenv.mkDerivation {
+  # . . .
+
+  nativeBuildInputs = [
+    zig_0_11.hook
+  ];
+
+  zigBuildFlags = [ "-Dman-pages=true" ];
+
+  dontUseZigCheck = true;
+
+  # . . .
+}
+```
+
+## Variables controlling zig.hook {#zig-hook-variables-controlling}
+
+### `zig.hook` Exclusive Variables {#zig-hook-exclusive-variables}
+
+The variables below are exclusive to `zig.hook`.
+
+#### `dontUseZigBuild` {#dont-use-zig-build}
+
+Disables using `zigBuildPhase`.
+
+#### `dontUseZigCheck` {#dont-use-zig-check}
+
+Disables using `zigCheckPhase`.
+
+#### `dontUseZigInstall` {#dont-use-zig-install}
+
+Disables using `zigInstallPhase`.
+
+### Similar variables {#zig-hook-similar-variables}
+
+The following variables are similar to their `stdenv.mkDerivation` counterparts.
+
+| `zig.hook` Variable | `stdenv.mkDerivation` Counterpart |
+|---------------------|-----------------------------------|
+| `zigBuildFlags`     | `buildFlags`                      |
+| `zigCheckFlags`     | `checkFlags`                      |
+| `zigInstallFlags`   | `installFlags`                    |
+
+### Variables honored by zig.hook {#zig-hook-variables-honored}
+
+The following variables commonly used by `stdenv.mkDerivation` are honored by `zig.hook`.
+
+- `prefixKey`
+- `dontAddPrefix`
diff --git a/nixpkgs/doc/languages-frameworks/agda.section.md b/nixpkgs/doc/languages-frameworks/agda.section.md
new file mode 100644
index 000000000000..33fffc60c8db
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/agda.section.md
@@ -0,0 +1,257 @@
+# Agda {#agda}
+
+## How to use Agda {#how-to-use-agda}
+
+Agda is available as the [agda](https://search.nixos.org/packages?channel=unstable&show=agda&from=0&size=30&sort=relevance&query=agda)
+package.
+
+The `agda` package installs an Agda-wrapper, which calls `agda` with `--library-file`
+set to a generated library-file within the nix store, this means your library-file in
+`$HOME/.agda/libraries` will be ignored. By default the agda package installs Agda
+with no libraries, i.e. the generated library-file is empty. To use Agda with libraries,
+the `agda.withPackages` function can be used. This function either takes:
+
+* A list of packages,
+* or a function which returns a list of packages when given the `agdaPackages` attribute set,
+* or an attribute set containing a list of packages and a GHC derivation for compilation (see below).
+* or an attribute set containing a function which returns a list of packages when given the `agdaPackages` attribute set and a GHC derivation for compilation (see below).
+
+For example, suppose we wanted a version of Agda which has access to the standard library. This can be obtained with the expressions:
+
+```nix
+agda.withPackages [ agdaPackages.standard-library ]
+```
+
+or
+
+```nix
+agda.withPackages (p: [ p.standard-library ])
+```
+
+or can be called as in the [Compiling Agda](#compiling-agda) section.
+
+If you want to use a different version of a library (for instance a development version)
+override the `src` attribute of the package to point to your local repository
+
+```nix
+agda.withPackages (p: [
+  (p.standard-library.overrideAttrs (oldAttrs: {
+    version = "local version";
+    src = /path/to/local/repo/agda-stdlib;
+  }))
+])
+```
+
+You can also reference a GitHub repository
+
+```nix
+agda.withPackages (p: [
+  (p.standard-library.overrideAttrs (oldAttrs: {
+    version = "1.5";
+    src =  fetchFromGitHub {
+      repo = "agda-stdlib";
+      owner = "agda";
+      rev = "v1.5";
+      hash = "sha256-nEyxYGSWIDNJqBfGpRDLiOAnlHJKEKAOMnIaqfVZzJk=";
+    };
+  }))
+])
+```
+
+If you want to use a library not added to Nixpkgs, you can add a
+dependency to a local library by calling `agdaPackages.mkDerivation`.
+
+```nix
+agda.withPackages (p: [
+  (p.mkDerivation {
+    pname = "your-agda-lib";
+    version = "1.0.0";
+    src = /path/to/your-agda-lib;
+  })
+])
+```
+
+Again you can reference GitHub
+
+```nix
+agda.withPackages (p: [
+  (p.mkDerivation {
+    pname = "your-agda-lib";
+    version = "1.0.0";
+    src = fetchFromGitHub {
+      repo = "repo";
+      owner = "owner";
+      version = "...";
+      rev = "...";
+      hash = "...";
+    };
+  })
+])
+```
+
+See [Building Agda Packages](#building-agda-packages) for more information on `mkDerivation`.
+
+Agda will not by default use these libraries. To tell Agda to use a library we have some options:
+
+* Call `agda` with the library flag:
+  ```ShellSession
+  $ agda -l standard-library -i . MyFile.agda
+  ```
+* Write a `my-library.agda-lib` file for the project you are working on which may look like:
+  ```
+  name: my-library
+  include: .
+  depend: standard-library
+  ```
+* Create the file `~/.agda/defaults` and add any libraries you want to use by default.
+
+More information can be found in the [official Agda documentation on library management](https://agda.readthedocs.io/en/v2.6.1/tools/package-system.html).
+
+## Compiling Agda {#compiling-agda}
+
+Agda modules can be compiled using the GHC backend with the `--compile` flag. A version of `ghc` with `ieee754` is made available to the Agda program via the `--with-compiler` flag.
+This can be overridden by a different version of `ghc` as follows:
+
+```nix
+agda.withPackages {
+  pkgs = [ /* ... */ ];
+  ghc = haskell.compiler.ghcHEAD;
+}
+```
+
+## Writing Agda packages {#writing-agda-packages}
+
+To write a nix derivation for an Agda library, first check that the library has a `*.agda-lib` file.
+
+A derivation can then be written using `agdaPackages.mkDerivation`. This has similar arguments to `stdenv.mkDerivation` with the following additions:
+
+* `everythingFile` can be used to specify the location of the `Everything.agda` file, defaulting to `./Everything.agda`. If this file does not exist then either it should be patched in or the `buildPhase` should be overridden (see below).
+* `libraryName` should be the name that appears in the `*.agda-lib` file, defaulting to `pname`.
+* `libraryFile` should be the file name of the `*.agda-lib` file, defaulting to `${libraryName}.agda-lib`.
+
+Here is an example `default.nix`
+
+```nix
+{ nixpkgs ?  <nixpkgs> }:
+with (import nixpkgs {});
+agdaPackages.mkDerivation {
+  version = "1.0";
+  pname = "my-agda-lib";
+  src = ./.;
+  buildInputs = [
+    agdaPackages.standard-library
+  ];
+}
+```
+
+### Building Agda packages {#building-agda-packages}
+
+The default build phase for `agdaPackages.mkDerivation` runs `agda` on the `Everything.agda` file.
+If something else is needed to build the package (e.g. `make`) then the `buildPhase` should be overridden.
+Additionally, a `preBuild` or `configurePhase` can be used if there are steps that need to be done prior to checking the `Everything.agda` file.
+`agda` and the Agda libraries contained in `buildInputs` are made available during the build phase.
+
+### Installing Agda packages {#installing-agda-packages}
+
+The default install phase copies Agda source files, Agda interface files (`*.agdai`) and `*.agda-lib` files to the output directory.
+This can be overridden.
+
+By default, Agda sources are files ending on `.agda`, or literate Agda files ending on `.lagda`, `.lagda.tex`, `.lagda.org`, `.lagda.md`, `.lagda.rst`. The list of recognised Agda source extensions can be extended by setting the `extraExtensions` config variable.
+
+## Maintaining the Agda package set on Nixpkgs {#maintaining-the-agda-package-set-on-nixpkgs}
+
+We are aiming at providing all common Agda libraries as packages on `nixpkgs`,
+and keeping them up to date.
+Contributions and maintenance help is always appreciated,
+but the maintenance effort is typically low since the Agda ecosystem is quite small.
+
+The `nixpkgs` Agda package set tries to take up a role similar to that of [Stackage](https://www.stackage.org/) in the Haskell world.
+It is a curated set of libraries that:
+
+1. Always work together.
+2. Are as up-to-date as possible.
+
+While the Haskell ecosystem is huge, and Stackage is highly automatised,
+the Agda package set is small and can (still) be maintained by hand.
+
+### Adding Agda packages to Nixpkgs {#adding-agda-packages-to-nixpkgs}
+
+To add an Agda package to `nixpkgs`, the derivation should be written to `pkgs/development/libraries/agda/${library-name}/` and an entry should be added to `pkgs/top-level/agda-packages.nix`. Here it is called in a scope with access to all other Agda libraries, so the top line of the `default.nix` can look like:
+
+```nix
+{ mkDerivation, standard-library, fetchFromGitHub }:
+{}
+```
+
+Note that the derivation function is called with `mkDerivation` set to `agdaPackages.mkDerivation`, therefore you
+could use a similar set as in your `default.nix` from [Writing Agda Packages](#writing-agda-packages) with
+`agdaPackages.mkDerivation` replaced with `mkDerivation`.
+
+Here is an example skeleton derivation for iowa-stdlib:
+
+```nix
+mkDerivation {
+  version = "1.5.0";
+  pname = "iowa-stdlib";
+
+  src = <...>;
+
+  libraryFile = "";
+  libraryName = "IAL-1.3";
+
+  buildPhase = ''
+    patchShebangs find-deps.sh
+    make
+  '';
+}
+```
+
+This library has a file called `.agda-lib`, and so we give an empty string to `libraryFile` as nothing precedes `.agda-lib` in the filename. This file contains `name: IAL-1.3`, and so we let `libraryName =  "IAL-1.3"`. This library does not use an `Everything.agda` file and instead has a Makefile, so there is no need to set `everythingFile` and we set a custom `buildPhase`.
+
+When writing an Agda package it is essential to make sure that no `.agda-lib` file gets added to the store as a single file (for example by using `writeText`). This causes Agda to think that the nix store is a Agda library and it will attempt to write to it whenever it typechecks something. See [https://github.com/agda/agda/issues/4613](https://github.com/agda/agda/issues/4613).
+
+In the pull request adding this library,
+you can test whether it builds correctly by writing in a comment:
+
+```
+@ofborg build agdaPackages.iowa-stdlib
+```
+
+### Maintaining Agda packages {#agda-maintaining-packages}
+
+As mentioned before, the aim is to have a compatible, and up-to-date package set.
+These two conditions sometimes exclude each other:
+For example, if we update `agdaPackages.standard-library` because there was an upstream release,
+this will typically break many reverse dependencies,
+i.e. downstream Agda libraries that depend on the standard library.
+In `nixpkgs` we are typically among the first to notice this,
+since we have build tests in place to check this.
+
+In a pull request updating e.g. the standard library, you should write the following comment:
+
+```
+@ofborg build agdaPackages.standard-library.passthru.tests
+```
+
+This will build all reverse dependencies of the standard library,
+for example `agdaPackages.agda-categories`, or `agdaPackages.generic`.
+
+In some cases it is useful to build _all_ Agda packages.
+This can be done with the following Github comment:
+
+```
+@ofborg build agda.passthru.tests.allPackages
+```
+
+Sometimes, the builds of the reverse dependencies fail because they have not yet been updated and released.
+You should drop the maintainers a quick issue notifying them of the breakage,
+citing the build error (which you can get from the ofborg logs).
+If you are motivated, you might even send a pull request that fixes it.
+Usually, the maintainers will answer within a week or two with a new release.
+Bumping the version of that reverse dependency should be a further commit on your PR.
+
+In the rare case that a new release is not to be expected within an acceptable time,
+mark the broken package as broken by setting `meta.broken = true;`.
+This will exclude it from the build test.
+It can be added later when it is fixed,
+and does not hinder the advancement of the whole package set in the meantime.
diff --git a/nixpkgs/doc/languages-frameworks/android.section.md b/nixpkgs/doc/languages-frameworks/android.section.md
new file mode 100644
index 000000000000..1c5687f8ebf1
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/android.section.md
@@ -0,0 +1,351 @@
+# Android {#android}
+
+The Android build environment provides three major features and a number of
+supporting features.
+
+## Deploying an Android SDK installation with plugins {#deploying-an-android-sdk-installation-with-plugins}
+
+The first use case is deploying the SDK with a desired set of plugins or subsets
+of an SDK.
+
+```nix
+with import <nixpkgs> {};
+
+let
+  androidComposition = androidenv.composeAndroidPackages {
+    cmdLineToolsVersion = "8.0";
+    toolsVersion = "26.1.1";
+    platformToolsVersion = "30.0.5";
+    buildToolsVersions = [ "30.0.3" ];
+    includeEmulator = false;
+    emulatorVersion = "30.3.4";
+    platformVersions = [ "28" "29" "30" ];
+    includeSources = false;
+    includeSystemImages = false;
+    systemImageTypes = [ "google_apis_playstore" ];
+    abiVersions = [ "armeabi-v7a" "arm64-v8a" ];
+    cmakeVersions = [ "3.10.2" ];
+    includeNDK = true;
+    ndkVersions = ["22.0.7026061"];
+    useGoogleAPIs = false;
+    useGoogleTVAddOns = false;
+    includeExtras = [
+      "extras;google;gcm"
+    ];
+  };
+in
+androidComposition.androidsdk
+```
+
+The above function invocation states that we want an Android SDK with the above
+specified plugin versions. By default, most plugins are disabled. Notable
+exceptions are the tools, platform-tools and build-tools sub packages.
+
+The following parameters are supported:
+
+* `cmdLineToolsVersion `, specifies the version of the `cmdline-tools` package to use
+* `toolsVersion`, specifies the version of the `tools` package. Notice `tools` is
+  obsolete, and currently only `26.1.1` is available, so there's not a lot of
+  options here, however, you can set it as `null` if you don't want it.
+* `platformsToolsVersion` specifies the version of the `platform-tools` plugin
+* `buildToolsVersions` specifies the versions of the `build-tools` plugins to
+  use.
+* `includeEmulator` specifies whether to deploy the emulator package (`false`
+  by default). When enabled, the version of the emulator to deploy can be
+  specified by setting the `emulatorVersion` parameter.
+* `cmakeVersions` specifies which CMake versions should be deployed.
+* `includeNDK` specifies that the Android NDK bundle should be included.
+  Defaults to: `false`.
+* `ndkVersions` specifies the NDK versions that we want to use. These are linked
+  under the `ndk` directory of the SDK root, and the first is linked under the
+  `ndk-bundle` directory.
+* `ndkVersion` is equivalent to specifying one entry in `ndkVersions`, and
+  `ndkVersions` overrides this parameter if provided.
+* `includeExtras` is an array of identifier strings referring to arbitrary
+  add-on packages that should be installed.
+* `platformVersions` specifies which platform SDK versions should be included.
+
+For each platform version that has been specified, we can apply the following
+options:
+
+* `includeSystemImages` specifies whether a system image for each platform SDK
+  should be included.
+* `includeSources` specifies whether the sources for each SDK version should be
+  included.
+* `useGoogleAPIs` specifies that for each selected platform version the
+  Google API should be included.
+* `useGoogleTVAddOns` specifies that for each selected platform version the
+  Google TV add-on should be included.
+
+For each requested system image we can specify the following options:
+
+* `systemImageTypes` specifies what kind of system images should be included.
+  Defaults to: `default`.
+* `abiVersions` specifies what kind of ABI version of each system image should
+  be included. Defaults to: `armeabi-v7a`.
+
+Most of the function arguments have reasonable default settings.
+
+You can specify license names:
+
+* `extraLicenses` is a list of license names.
+  You can get these names from repo.json or `querypackages.sh licenses`. The SDK
+  license (`android-sdk-license`) is accepted for you if you set accept_license
+  to true. If you are doing something like working with preview SDKs, you will
+  want to add `android-sdk-preview-license` or whichever license applies here.
+
+Additionally, you can override the repositories that composeAndroidPackages will
+pull from:
+
+* `repoJson` specifies a path to a generated repo.json file. You can generate this
+  by running `generate.sh`, which in turn will call into `mkrepo.rb`.
+* `repoXmls` is an attribute set containing paths to repo XML files. If specified,
+  it takes priority over `repoJson`, and will trigger a local build writing out a
+  repo.json to the Nix store based on the given repository XMLs.
+
+```nix
+{
+  repoXmls = {
+    packages = [ ./xml/repository2-1.xml ];
+    images = [
+      ./xml/android-sys-img2-1.xml
+      ./xml/android-tv-sys-img2-1.xml
+      ./xml/android-wear-sys-img2-1.xml
+      ./xml/android-wear-cn-sys-img2-1.xml
+      ./xml/google_apis-sys-img2-1.xml
+      ./xml/google_apis_playstore-sys-img2-1.xml
+    ];
+    addons = [ ./xml/addon2-1.xml ];
+  };
+}
+```
+
+When building the above expression with:
+
+```bash
+$ nix-build
+```
+
+The Android SDK gets deployed with all desired plugin versions.
+
+We can also deploy subsets of the Android SDK. For example, to only the
+`platform-tools` package, you can evaluate the following expression:
+
+```nix
+with import <nixpkgs> {};
+
+let
+  androidComposition = androidenv.composeAndroidPackages {
+    # ...
+  };
+in
+androidComposition.platform-tools
+```
+
+## Using predefined Android package compositions {#using-predefined-android-package-compositions}
+
+In addition to composing an Android package set manually, it is also possible
+to use a predefined composition that contains all basic packages for a specific
+Android version, such as version 9.0 (API-level 28).
+
+The following Nix expression can be used to deploy the entire SDK with all basic
+plugins:
+
+```nix
+with import <nixpkgs> {};
+
+androidenv.androidPkgs_9_0.androidsdk
+```
+
+It is also possible to use one plugin only:
+
+```nix
+with import <nixpkgs> {};
+
+androidenv.androidPkgs_9_0.platform-tools
+```
+
+## Building an Android application {#building-an-android-application}
+
+In addition to the SDK, it is also possible to build an Ant-based Android
+project and automatically deploy all the Android plugins that a project
+requires.
+
+
+```nix
+with import <nixpkgs> {};
+
+androidenv.buildApp {
+  name = "MyAndroidApp";
+  src = ./myappsources;
+  release = true;
+
+  # If release is set to true, you need to specify the following parameters
+  keyStore = ./keystore;
+  keyAlias = "myfirstapp";
+  keyStorePassword = "mykeystore";
+  keyAliasPassword = "myfirstapp";
+
+  # Any Android SDK parameters that install all the relevant plugins that a
+  # build requires
+  platformVersions = [ "24" ];
+
+  # When we include the NDK, then ndk-build is invoked before Ant gets invoked
+  includeNDK = true;
+}
+```
+
+Aside from the app-specific build parameters (`name`, `src`, `release` and
+keystore parameters), the `buildApp {}` function supports all the function
+parameters that the SDK composition function (the function shown in the
+previous section) supports.
+
+This build function is particularly useful when it is desired to use
+[Hydra](https://nixos.org/hydra): the Nix-based continuous integration solution
+to build Android apps. An Android APK gets exposed as a build product and can be
+installed on any Android device with a web browser by navigating to the build
+result page.
+
+## Spawning emulator instances {#spawning-emulator-instances}
+
+For testing purposes, it can also be quite convenient to automatically generate
+scripts that spawn emulator instances with all desired configuration settings.
+
+An emulator spawn script can be configured by invoking the `emulateApp {}`
+function:
+
+```nix
+with import <nixpkgs> {};
+
+androidenv.emulateApp {
+  name = "emulate-MyAndroidApp";
+  platformVersion = "28";
+  abiVersion = "x86"; # armeabi-v7a, mips, x86_64
+  systemImageType = "google_apis_playstore";
+}
+```
+
+Additional flags may be applied to the Android SDK's emulator through the runtime environment variable `$NIX_ANDROID_EMULATOR_FLAGS`.
+
+It is also possible to specify an APK to deploy inside the emulator
+and the package and activity names to launch it:
+
+```nix
+with import <nixpkgs> {};
+
+androidenv.emulateApp {
+  name = "emulate-MyAndroidApp";
+  platformVersion = "24";
+  abiVersion = "armeabi-v7a"; # mips, x86, x86_64
+  systemImageType = "default";
+  app = ./MyApp.apk;
+  package = "MyApp";
+  activity = "MainActivity";
+}
+```
+
+In addition to prebuilt APKs, you can also bind the APK parameter to a
+`buildApp {}` function invocation shown in the previous example.
+
+## Notes on environment variables in Android projects {#notes-on-environment-variables-in-android-projects}
+
+* `ANDROID_SDK_ROOT` should point to the Android SDK. In your Nix expressions, this should be
+  `${androidComposition.androidsdk}/libexec/android-sdk`. Note that `ANDROID_HOME` is deprecated,
+  but if you rely on tools that need it, you can export it too.
+* `ANDROID_NDK_ROOT` should point to the Android NDK, if you're doing NDK development.
+  In your Nix expressions, this should be `${ANDROID_SDK_ROOT}/ndk-bundle`.
+
+If you are running the Android Gradle plugin, you need to export GRADLE_OPTS to override aapt2
+to point to the aapt2 binary in the Nix store as well, or use a FHS environment so the packaged
+aapt2 can run. If you don't want to use a FHS environment, something like this should work:
+
+```nix
+let
+  buildToolsVersion = "30.0.3";
+
+  # Use buildToolsVersion when you define androidComposition
+  androidComposition = <...>;
+in
+pkgs.mkShell rec {
+  ANDROID_SDK_ROOT = "${androidComposition.androidsdk}/libexec/android-sdk";
+  ANDROID_NDK_ROOT = "${ANDROID_SDK_ROOT}/ndk-bundle";
+
+  # Use the same buildToolsVersion here
+  GRADLE_OPTS = "-Dorg.gradle.project.android.aapt2FromMavenOverride=${ANDROID_SDK_ROOT}/build-tools/${buildToolsVersion}/aapt2";
+}
+```
+
+If you are using cmake, you need to add it to PATH in a shell hook or FHS env profile.
+The path is suffixed with a build number, but properly prefixed with the version.
+So, something like this should suffice:
+
+```nix
+let
+  cmakeVersion = "3.10.2";
+
+  # Use cmakeVersion when you define androidComposition
+  androidComposition = <...>;
+in
+pkgs.mkShell rec {
+  ANDROID_SDK_ROOT = "${androidComposition.androidsdk}/libexec/android-sdk";
+  ANDROID_NDK_ROOT = "${ANDROID_SDK_ROOT}/ndk-bundle";
+
+  # Use the same cmakeVersion here
+  shellHook = ''
+    export PATH="$(echo "$ANDROID_SDK_ROOT/cmake/${cmakeVersion}".*/bin):$PATH"
+  '';
+}
+```
+
+Note that running Android Studio with ANDROID_SDK_ROOT set will automatically write a
+`local.properties` file with `sdk.dir` set to $ANDROID_SDK_ROOT if one does not already
+exist. If you are using the NDK as well, you may have to add `ndk.dir` to this file.
+
+An example shell.nix that does all this for you is provided in examples/shell.nix.
+This shell.nix includes a shell hook that overwrites local.properties with the correct
+sdk.dir and ndk.dir values. This will ensure that the SDK and NDK directories will
+both be correct when you run Android Studio inside nix-shell.
+
+## Notes on improving build.gradle compatibility {#notes-on-improving-build.gradle-compatibility}
+
+Ensure that your buildToolsVersion and ndkVersion match what is declared in androidenv.
+If you are using cmake, make sure its declared version is correct too.
+
+Otherwise, you may get cryptic errors from aapt2 and the Android Gradle plugin warning
+that it cannot install the build tools because the SDK directory is not writeable.
+
+```gradle
+android {
+    buildToolsVersion "30.0.3"
+    ndkVersion = "22.0.7026061"
+    externalNativeBuild {
+        cmake {
+            version "3.10.2"
+        }
+    }
+}
+
+```
+
+## Querying the available versions of each plugin {#querying-the-available-versions-of-each-plugin}
+
+repo.json provides all the options in one file now.
+
+A shell script in the `pkgs/development/mobile/androidenv/` subdirectory can be used to retrieve all
+possible options:
+
+```bash
+./querypackages.sh packages
+```
+
+The above command-line instruction queries all package versions in repo.json.
+
+## Updating the generated expressions {#updating-the-generated-expressions}
+
+repo.json is generated from XML files that the Android Studio package manager uses.
+To update the expressions run the `generate.sh` script that is stored in the
+`pkgs/development/mobile/androidenv/` subdirectory:
+
+```bash
+./generate.sh
+```
diff --git a/nixpkgs/doc/languages-frameworks/beam.section.md b/nixpkgs/doc/languages-frameworks/beam.section.md
new file mode 100644
index 000000000000..3653cdb337d1
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/beam.section.md
@@ -0,0 +1,396 @@
+# BEAM Languages (Erlang, Elixir & LFE) {#sec-beam}
+
+## Introduction {#beam-introduction}
+
+In this document and related Nix expressions, we use the term, _BEAM_, to describe the environment. BEAM is the name of the Erlang Virtual Machine and, as far as we're concerned, from a packaging perspective, all languages that run on the BEAM are interchangeable. That which varies, like the build system, is transparent to users of any given BEAM package, so we make no distinction.
+
+## Available versions and deprecations schedule {#available-versions-and-deprecations-schedule}
+
+### Elixir {#elixir}
+
+nixpkgs follows the [official elixir deprecation schedule](https://hexdocs.pm/elixir/compatibility-and-deprecations.html) and keeps the last 5 released versions of Elixir available.
+
+## Structure {#beam-structure}
+
+All BEAM-related expressions are available via the top-level `beam` attribute, which includes:
+
+- `interpreters`: a set of compilers running on the BEAM, including multiple Erlang/OTP versions (`beam.interpreters.erlang_22`, etc), Elixir (`beam.interpreters.elixir`) and LFE (Lisp Flavoured Erlang) (`beam.interpreters.lfe`).
+
+- `packages`: a set of package builders (Mix and rebar3), each compiled with a specific Erlang/OTP version, e.g. `beam.packages.erlang22`.
+
+The default Erlang compiler, defined by `beam.interpreters.erlang`, is aliased as `erlang`. The default BEAM package set is defined by `beam.packages.erlang` and aliased at the top level as `beamPackages`.
+
+To create a package builder built with a custom Erlang version, use the lambda, `beam.packagesWith`, which accepts an Erlang/OTP derivation and produces a package builder similar to `beam.packages.erlang`.
+
+Many Erlang/OTP distributions available in `beam.interpreters` have versions with ODBC and/or Java enabled or without wx (no observer support). For example, there's `beam.interpreters.erlang_22_odbc_javac`, which corresponds to `beam.interpreters.erlang_22` and `beam.interpreters.erlang_22_nox`, which corresponds to `beam.interpreters.erlang_22`.
+
+## Build Tools {#build-tools}
+
+### Rebar3 {#build-tools-rebar3}
+
+We provide a version of Rebar3, under `rebar3`. We also provide a helper to fetch Rebar3 dependencies from a lockfile under `fetchRebar3Deps`.
+
+We also provide a version on Rebar3 with plugins included, under `rebar3WithPlugins`. This package is a function which takes two arguments: `plugins`, a list of nix derivations to include as plugins (loaded only when specified in `rebar.config`), and `globalPlugins`, which should always be loaded by rebar3. Example: `rebar3WithPlugins { globalPlugins = [beamPackages.pc]; }`.
+
+When adding a new plugin it is important that the `packageName` attribute is the same as the atom used by rebar3 to refer to the plugin.
+
+### Mix & Erlang.mk {#build-tools-other}
+
+Erlang.mk works exactly as expected. There is a bootstrap process that needs to be run, which is supported by the `buildErlangMk` derivation.
+
+For Elixir applications use `mixRelease` to make a release. See examples for more details.
+
+There is also a `buildMix` helper, whose behavior is closer to that of `buildErlangMk` and `buildRebar3`. The primary difference is that mixRelease makes a release, while buildMix only builds the package, making it useful for libraries and other dependencies.
+
+## How to Install BEAM Packages {#how-to-install-beam-packages}
+
+BEAM builders are not registered at the top level, because they are not relevant to the vast majority of Nix users.
+To use any of those builders into your environment, refer to them by their attribute path under `beamPackages`, e.g. `beamPackages.rebar3`:
+
+::: {.example #ex-beam-ephemeral-shell}
+# Ephemeral shell
+
+```ShellSession
+$ nix-shell -p beamPackages.rebar3
+```
+:::
+
+::: {.example #ex-beam-declarative-shell}
+# Declarative shell
+
+```nix
+let
+  pkgs = import <nixpkgs> { config = {}; overlays = []; };
+in
+pkgs.mkShell {
+  packages = [ pkgs.beamPackages.rebar3 ];
+}
+```
+:::
+
+## Packaging BEAM Applications {#packaging-beam-applications}
+
+### Erlang Applications {#packaging-erlang-applications}
+
+#### Rebar3 Packages {#rebar3-packages}
+
+The Nix function, `buildRebar3`, defined in `beam.packages.erlang.buildRebar3` and aliased at the top level, can be used to build a derivation that understands how to build a Rebar3 project.
+
+If a package needs to compile native code via Rebar3's port compilation mechanism, add `compilePort = true;` to the derivation.
+
+#### Erlang.mk Packages {#erlang-mk-packages}
+
+Erlang.mk functions similarly to Rebar3, except we use `buildErlangMk` instead of `buildRebar3`.
+
+#### Mix Packages {#mix-packages}
+
+`mixRelease` is used to make a release in the mix sense. Dependencies will need to be fetched with `fetchMixDeps` and passed to it.
+
+#### mixRelease - Elixir Phoenix example {#mix-release-elixir-phoenix-example}
+
+there are 3 steps, frontend dependencies (javascript), backend dependencies (elixir) and the final derivation that puts both of those together
+
+##### mixRelease - Frontend dependencies (javascript) {#mix-release-javascript-deps}
+
+For phoenix projects, inside of nixpkgs you can either use yarn2nix (mkYarnModule) or node2nix. An example with yarn2nix can be found [here](https://github.com/NixOS/nixpkgs/blob/master/pkgs/servers/web-apps/plausible/default.nix#L39). An example with node2nix will follow. To package something outside of nixpkgs, you have alternatives like [npmlock2nix](https://github.com/nix-community/npmlock2nix) or [nix-npm-buildpackage](https://github.com/serokell/nix-npm-buildpackage)
+
+##### mixRelease - backend dependencies (mix) {#mix-release-mix-deps}
+
+There are 2 ways to package backend dependencies. With mix2nix and with a fixed-output-derivation (FOD).
+
+###### mix2nix {#mix2nix}
+
+`mix2nix` is a cli tool available in nixpkgs. it will generate a nix expression from a mix.lock file. It is quite standard in the 2nix tool series.
+
+Note that currently mix2nix can't handle git dependencies inside the mix.lock file. If you have git dependencies, you can either add them manually (see [example](https://github.com/NixOS/nixpkgs/blob/master/pkgs/servers/pleroma/default.nix#L20)) or use the FOD method.
+
+The advantage of using mix2nix is that nix will know your whole dependency graph. On a dependency update, this won't trigger a full rebuild and download of all the dependencies, where FOD will do so.
+
+Practical steps:
+
+- run `mix2nix > mix_deps.nix` in the upstream repo.
+- pass `mixNixDeps = with pkgs; import ./mix_deps.nix { inherit lib beamPackages; };` as an argument to mixRelease.
+
+If there are git dependencies.
+
+- You'll need to fix the version artificially in mix.exs and regenerate the mix.lock with fixed version (on upstream). This will enable you to run `mix2nix > mix_deps.nix`.
+- From the mix_deps.nix file, remove the dependencies that had git versions and pass them as an override to the import function.
+
+```nix
+{
+  mixNixDeps = import ./mix.nix {
+    inherit beamPackages lib;
+    overrides = (final: prev: {
+      # mix2nix does not support git dependencies yet,
+      # so we need to add them manually
+      prometheus_ex = beamPackages.buildMix rec {
+        name = "prometheus_ex";
+        version = "3.0.5";
+
+        # Change the argument src with the git src that you actually need
+        src = fetchFromGitLab {
+          domain = "git.pleroma.social";
+          group = "pleroma";
+          owner = "elixir-libraries";
+          repo = "prometheus.ex";
+          rev = "a4e9beb3c1c479d14b352fd9d6dd7b1f6d7deee5";
+          hash = "sha256-U17LlN6aGUKUFnT4XyYXppRN+TvUBIBRHEUsfeIiGOw=";
+        };
+        # you can re-use the same beamDeps argument as generated
+        beamDeps = with final; [ prometheus ];
+      };
+    });
+  };
+}
+```
+
+You will need to run the build process once to fix the hash to correspond to your new git src.
+
+###### FOD {#fixed-output-derivation}
+
+A fixed output derivation will download mix dependencies from the internet. To ensure reproducibility, a hash will be supplied. Note that mix is relatively reproducible. An FOD generating a different hash on each run hasn't been observed (as opposed to npm where the chances are relatively high). See [elixir-ls](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/beam-modules/elixir-ls/default.nix) for a usage example of FOD.
+
+Practical steps
+
+- start with the following argument to mixRelease
+
+```nix
+{
+  mixFodDeps = fetchMixDeps {
+    pname = "mix-deps-${pname}";
+    inherit src version;
+    hash = lib.fakeHash;
+  };
+}
+```
+
+The first build will complain about the hash value, you can replace with the suggested value after that.
+
+Note that if after you've replaced the value, nix suggests another hash, then mix is not fetching the dependencies reproducibly. An FOD will not work in that case and you will have to use mix2nix.
+
+##### mixRelease - example {#mix-release-example}
+
+Here is how your `default.nix` file would look for a phoenix project.
+
+```nix
+with import <nixpkgs> { };
+
+let
+  # beam.interpreters.erlang_26 is available if you need a particular version
+  packages = beam.packagesWith beam.interpreters.erlang;
+
+  pname = "your_project";
+  version = "0.0.1";
+
+  src = builtins.fetchgit {
+    url = "ssh://git@github.com/your_id/your_repo";
+    rev = "replace_with_your_commit";
+  };
+
+  # if using mix2nix you can use the mixNixDeps attribute
+  mixFodDeps = packages.fetchMixDeps {
+    pname = "mix-deps-${pname}";
+    inherit src version;
+    # nix will complain and tell you the right value to replace this with
+    hash = lib.fakeHash;
+    mixEnv = ""; # default is "prod", when empty includes all dependencies, such as "dev", "test".
+    # if you have build time environment variables add them here
+    MY_ENV_VAR="my_value";
+  };
+
+  nodeDependencies = (pkgs.callPackage ./assets/default.nix { }).shell.nodeDependencies;
+
+in packages.mixRelease {
+  inherit src pname version mixFodDeps;
+  # if you have build time environment variables add them here
+  MY_ENV_VAR="my_value";
+
+  postBuild = ''
+    ln -sf ${nodeDependencies}/lib/node_modules assets/node_modules
+    npm run deploy --prefix ./assets
+
+    # for external task you need a workaround for the no deps check flag
+    # https://github.com/phoenixframework/phoenix/issues/2690
+    mix do deps.loadpaths --no-deps-check, phx.digest
+    mix phx.digest --no-deps-check
+  '';
+}
+```
+
+Setup will require the following steps:
+
+- Move your secrets to runtime environment variables. For more information refer to the [runtime.exs docs](https://hexdocs.pm/mix/Mix.Tasks.Release.html#module-runtime-configuration). On a fresh Phoenix build that would mean that both `DATABASE_URL` and `SECRET_KEY` need to be moved to `runtime.exs`.
+- `cd assets` and `nix-shell -p node2nix --run "node2nix --development"` will generate a Nix expression containing your frontend dependencies
+- commit and push those changes
+- you can now `nix-build .`
+- To run the release, set the `RELEASE_TMP` environment variable to a directory that your program has write access to. It will be used to store the BEAM settings.
+
+#### Example of creating a service for an Elixir - Phoenix project {#example-of-creating-a-service-for-an-elixir---phoenix-project}
+
+In order to create a service with your release, you could add a `service.nix`
+in your project with the following
+
+```nix
+{config, pkgs, lib, ...}:
+
+let
+  release = pkgs.callPackage ./default.nix;
+  release_name = "app";
+  working_directory = "/home/app";
+in
+{
+  systemd.services.${release_name} = {
+    wantedBy = [ "multi-user.target" ];
+    after = [ "network.target" "postgresql.service" ];
+    # note that if you are connecting to a postgres instance on a different host
+    # postgresql.service should not be included in the requires.
+    requires = [ "network-online.target" "postgresql.service" ];
+    description = "my app";
+    environment = {
+      # RELEASE_TMP is used to write the state of the
+      # VM configuration when the system is running
+      # it needs to be a writable directory
+      RELEASE_TMP = working_directory;
+      # can be generated in an elixir console with
+      # Base.encode32(:crypto.strong_rand_bytes(32))
+      RELEASE_COOKIE = "my_cookie";
+      MY_VAR = "my_var";
+    };
+    serviceConfig = {
+      Type = "exec";
+      DynamicUser = true;
+      WorkingDirectory = working_directory;
+      # Implied by DynamicUser, but just to emphasize due to RELEASE_TMP
+      PrivateTmp = true;
+      ExecStart = ''
+        ${release}/bin/${release_name} start
+      '';
+      ExecStop = ''
+        ${release}/bin/${release_name} stop
+      '';
+      ExecReload = ''
+        ${release}/bin/${release_name} restart
+      '';
+      Restart = "on-failure";
+      RestartSec = 5;
+      StartLimitBurst = 3;
+      StartLimitInterval = 10;
+    };
+    # disksup requires bash
+    path = [ pkgs.bash ];
+  };
+
+  # in case you have migration scripts or you want to use a remote shell
+  environment.systemPackages = [ release ];
+}
+```
+
+## How to Develop {#how-to-develop}
+
+### Creating a Shell {#creating-a-shell}
+
+Usually, we need to create a `shell.nix` file and do our development inside of the environment specified therein. Just install your version of Erlang and any other interpreters, and then use your normal build tools. As an example with Elixir:
+
+```nix
+{ pkgs ? import <nixpkgs> {} }:
+
+with pkgs;
+let
+  elixir = beam.packages.erlang_24.elixir_1_12;
+in
+mkShell {
+  buildInputs = [ elixir ];
+}
+```
+
+### Using an overlay {#beam-using-overlays}
+
+If you need to use an overlay to change some attributes of a derivation, e.g. if you need a bugfix from a version that is not yet available in nixpkgs, you can override attributes such as `version` (and the corresponding `hash`) and then use this overlay in your development environment:
+
+#### `shell.nix` {#beam-using-overlays-shell.nix}
+
+```nix
+let
+  elixir_1_13_1_overlay = (self: super: {
+      elixir_1_13 = super.elixir_1_13.override {
+        version = "1.13.1";
+        sha256 = "sha256-t0ic1LcC7EV3avWGdR7VbyX7pGDpnJSW1ZvwvQUPC3w=";
+      };
+    });
+  pkgs = import <nixpkgs> { overlays = [ elixir_1_13_1_overlay ]; };
+in
+with pkgs;
+mkShell {
+  buildInputs = [
+    elixir_1_13
+  ];
+}
+```
+
+#### Elixir - Phoenix project {#elixir---phoenix-project}
+
+Here is an example `shell.nix`.
+
+```nix
+with import <nixpkgs> { };
+
+let
+  # define packages to install
+  basePackages = [
+    git
+    # replace with beam.packages.erlang.elixir_1_13 if you need
+    beam.packages.erlang.elixir
+    nodejs
+    postgresql_14
+    # only used for frontend dependencies
+    # you are free to use yarn2nix as well
+    nodePackages.node2nix
+    # formatting js file
+    nodePackages.prettier
+  ];
+
+  inputs = basePackages ++ lib.optionals stdenv.isLinux [ inotify-tools ]
+    ++ lib.optionals stdenv.isDarwin
+    (with darwin.apple_sdk.frameworks; [ CoreFoundation CoreServices ]);
+
+  # define shell startup command
+  hooks = ''
+    # this allows mix to work on the local directory
+    mkdir -p .nix-mix .nix-hex
+    export MIX_HOME=$PWD/.nix-mix
+    export HEX_HOME=$PWD/.nix-mix
+    # make hex from Nixpkgs available
+    # `mix local.hex` will install hex into MIX_HOME and should take precedence
+    export MIX_PATH="${beam.packages.erlang.hex}/lib/erlang/lib/hex/ebin"
+    export PATH=$MIX_HOME/bin:$HEX_HOME/bin:$PATH
+    export LANG=C.UTF-8
+    # keep your shell history in iex
+    export ERL_AFLAGS="-kernel shell_history enabled"
+
+    # postges related
+    # keep all your db data in a folder inside the project
+    export PGDATA="$PWD/db"
+
+    # phoenix related env vars
+    export POOL_SIZE=15
+    export DB_URL="postgresql://postgres:postgres@localhost:5432/db"
+    export PORT=4000
+    export MIX_ENV=dev
+    # add your project env vars here, word readable in the nix store.
+    export ENV_VAR="your_env_var"
+  '';
+
+in mkShell {
+  buildInputs = inputs;
+  shellHook = hooks;
+}
+```
+
+Initializing the project will require the following steps:
+
+- create the db directory `initdb ./db` (inside your mix project folder)
+- create the postgres user `createuser postgres -ds`
+- create the db `createdb db`
+- start the postgres instance `pg_ctl -l "$PGDATA/server.log" start`
+- add the `/db` folder to your `.gitignore`
+- you can start your phoenix server and get a shell with `iex -S mix phx.server`
diff --git a/nixpkgs/doc/languages-frameworks/bower.section.md b/nixpkgs/doc/languages-frameworks/bower.section.md
new file mode 100644
index 000000000000..20c142dad5b9
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/bower.section.md
@@ -0,0 +1,125 @@
+# Bower {#sec-bower}
+
+[Bower](https://bower.io) is a package manager for web site front-end components. Bower packages (comprising of build artifacts and sometimes sources) are stored in `git` repositories, typically on Github. The package registry is run by the Bower team with package metadata coming from the `bower.json` file within each package.
+
+The end result of running Bower is a `bower_components` directory which can be included in the web app's build process.
+
+Bower can be run interactively, by installing `nodePackages.bower`. More interestingly, the Bower components can be declared in a Nix derivation, with the help of `nodePackages.bower2nix`.
+
+## bower2nix usage {#ssec-bower2nix-usage}
+
+Suppose you have a `bower.json` with the following contents:
+
+### Example bower.json {#ex-bowerJson}
+
+```json
+  "name": "my-web-app",
+  "dependencies": {
+    "angular": "~1.5.0",
+    "bootstrap": "~3.3.6"
+  }
+```
+
+Running `bower2nix` will produce something like the following output:
+
+```nix
+{ fetchbower, buildEnv }:
+buildEnv { name = "bower-env"; ignoreCollisions = true; paths = [
+  (fetchbower "angular" "1.5.3" "~1.5.0" "1749xb0firxdra4rzadm4q9x90v6pzkbd7xmcyjk6qfza09ykk9y")
+  (fetchbower "bootstrap" "3.3.6" "~3.3.6" "1vvqlpbfcy0k5pncfjaiskj3y6scwifxygfqnw393sjfxiviwmbv")
+  (fetchbower "jquery" "2.2.2" "1.9.1 - 2" "10sp5h98sqwk90y4k6hbdviwqzvzwqf47r3r51pakch5ii2y7js1")
+]; }
+```
+
+Using the `bower2nix` command line arguments, the output can be redirected to a file. A name like `bower-packages.nix` would be fine.
+
+The resulting derivation is a union of all the downloaded Bower packages (and their dependencies). To use it, they still need to be linked together by Bower, which is where `buildBowerComponents` is useful.
+
+## buildBowerComponents function {#ssec-build-bower-components}
+
+The function is implemented in [pkgs/development/bower-modules/generic/default.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/bower-modules/generic/default.nix).
+
+### Example buildBowerComponents {#ex-buildBowerComponents}
+
+```nix
+{
+  bowerComponents = buildBowerComponents {
+    name = "my-web-app";
+    generated = ./bower-packages.nix; # note 1
+    src = myWebApp; # note 2
+  };
+}
+```
+
+In ["buildBowerComponents" example](#ex-buildBowerComponents) the following arguments are of special significance to the function:
+
+1. `generated` specifies the file which was created by {command}`bower2nix`.
+2. `src` is your project's sources. It needs to contain a {file}`bower.json` file.
+
+`buildBowerComponents` will run Bower to link together the output of `bower2nix`, resulting in a `bower_components` directory which can be used.
+
+Here is an example of a web frontend build process using `gulp`. You might use `grunt`, or anything else.
+
+### Example build script (gulpfile.js) {#ex-bowerGulpFile}
+
+```javascript
+var gulp = require('gulp');
+
+gulp.task('default', [], function () {
+  gulp.start('build');
+});
+
+gulp.task('build', [], function () {
+  console.log("Just a dummy gulp build");
+  gulp
+    .src(["./bower_components/**/*"])
+    .pipe(gulp.dest("./gulpdist/"));
+});
+```
+
+### Example Full example — default.nix {#ex-buildBowerComponentsDefaultNix}
+
+```nix
+{ myWebApp ? { outPath = ./.; name = "myWebApp"; }
+, pkgs ? import <nixpkgs> {}
+}:
+
+pkgs.stdenv.mkDerivation {
+  name = "my-web-app-frontend";
+  src = myWebApp;
+
+  buildInputs = [ pkgs.nodePackages.gulp ];
+
+  bowerComponents = pkgs.buildBowerComponents { # note 1
+    name = "my-web-app";
+    generated = ./bower-packages.nix;
+    src = myWebApp;
+  };
+
+  buildPhase = ''
+    cp --reflink=auto --no-preserve=mode -R $bowerComponents/bower_components . # note 2
+    export HOME=$PWD # note 3
+    ${pkgs.nodePackages.gulp}/bin/gulp build # note 4
+  '';
+
+  installPhase = "mv gulpdist $out";
+}
+```
+
+A few notes about [Full example — `default.nix`](#ex-buildBowerComponentsDefaultNix):
+
+1. The result of `buildBowerComponents` is an input to the frontend build.
+2. Whether to symlink or copy the {file}`bower_components` directory depends on the build tool in use.
+   In this case a copy is used to avoid {command}`gulp` silliness with permissions.
+3. {command}`gulp` requires `HOME` to refer to a writeable directory.
+4. The actual build command in this example is {command}`gulp`. Other tools could be used instead.
+
+## Troubleshooting {#ssec-bower2nix-troubleshooting}
+
+### ENOCACHE errors from buildBowerComponents {#enocache-errors-from-buildbowercomponents}
+
+This means that Bower was looking for a package version which doesn't exist in the generated `bower-packages.nix`.
+
+If `bower.json` has been updated, then run `bower2nix` again.
+
+It could also be a bug in `bower2nix` or `fetchbower`. If possible, try reformulating the version specification in `bower.json`.
diff --git a/nixpkgs/doc/languages-frameworks/chicken.section.md b/nixpkgs/doc/languages-frameworks/chicken.section.md
new file mode 100644
index 000000000000..16b00b3f5b5d
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/chicken.section.md
@@ -0,0 +1,80 @@
+# CHICKEN {#sec-chicken}
+
+[CHICKEN](https://call-cc.org/) is a
+[R⁵RS](https://schemers.org/Documents/Standards/R5RS/HTML/)-compliant Scheme
+compiler. It includes an interactive mode and a custom package format, "eggs".
+
+## Using Eggs {#sec-chicken-using}
+
+Eggs described in nixpkgs are available inside the
+`chickenPackages.chickenEggs` attrset. Including an egg as a build input is
+done in the typical Nix fashion. For example, to include support for [SRFI
+189](https://srfi.schemers.org/srfi-189/srfi-189.html) in a derivation, one
+might write:
+
+```nix
+{
+  buildInputs = [
+    chicken
+    chickenPackages.chickenEggs.srfi-189
+  ];
+}
+```
+
+Both `chicken` and its eggs have a setup hook which configures the environment
+variables `CHICKEN_INCLUDE_PATH` and `CHICKEN_REPOSITORY_PATH`.
+
+## Updating Eggs {#sec-chicken-updating-eggs}
+
+nixpkgs only knows about a subset of all published eggs. It uses
+[egg2nix](https://github.com/the-kenny/egg2nix) to generate a
+package set from a list of eggs to include.
+
+The package set is regenerated by running the following shell commands:
+
+```
+$ nix-shell -p chickenPackages.egg2nix
+$ cd pkgs/development/compilers/chicken/5/
+$ egg2nix eggs.scm > eggs.nix
+```
+
+## Adding Eggs {#sec-chicken-adding-eggs}
+
+When we run `egg2nix`, we obtain one collection of eggs with
+mutually-compatible versions. This means that when we add new eggs, we may
+need to update existing eggs. To keep those separate, follow the procedure for
+updating eggs before including more eggs.
+
+To include more eggs, edit `pkgs/development/compilers/chicken/5/eggs.scm`.
+The first section of this file lists eggs which are required by `egg2nix`
+itself; all other eggs go into the second section. After editing, follow the
+procedure for updating eggs.
+
+## Override Scope {#sec-chicken-override-scope}
+
+The chicken package and its eggs, respectively, reside in a scope. This means,
+the scope can be overridden to effect other packages in it.
+
+This example shows how to use a local copy of `srfi-180` and have it affect
+all the other eggs:
+
+```nix
+let
+  myChickenPackages = pkgs.chickenPackages.overrideScope' (self: super: {
+      # The chicken package itself can be overridden to effect the whole ecosystem.
+      # chicken = super.chicken.overrideAttrs {
+      #   src = ...
+      # };
+
+      chickenEggs = super.chickenEggs.overrideScope' (eggself: eggsuper: {
+        srfi-180 = eggsuper.srfi-180.overrideAttrs {
+          # path to a local copy of srfi-180
+          src = <...>;
+        };
+      });
+  });
+in
+# Here, `myChickenPackages.chickenEggs.json-rpc`, which depends on `srfi-180` will use
+# the local copy of `srfi-180`.
+<...>
+```
diff --git a/nixpkgs/doc/languages-frameworks/coq.section.md b/nixpkgs/doc/languages-frameworks/coq.section.md
new file mode 100644
index 000000000000..db3724773345
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/coq.section.md
@@ -0,0 +1,151 @@
+# Coq and coq packages {#sec-language-coq}
+
+## Coq derivation: `coq` {#coq-derivation-coq}
+
+The Coq derivation is overridable through the `coq.override overrides`, where overrides is an attribute set which contains the arguments to override. We recommend overriding either of the following
+
+* `version` (optional, defaults to the latest version of Coq selected for nixpkgs, see `pkgs/top-level/coq-packages` to witness this choice), which follows the conventions explained in the `coqPackages` section below,
+* `customOCamlPackages` (optional, defaults to `null`, which lets Coq choose a version automatically), which can be set to any of the ocaml packages attribute of `ocaml-ng` (such as `ocaml-ng.ocamlPackages_4_10` which is the default for Coq 8.11 for example).
+* `coq-version` (optional, defaults to the short version e.g. "8.10"), is a version number of the form "x.y" that indicates which Coq's version build behavior to mimic when using a source which is not a release. E.g. `coq.override { version = "d370a9d1328a4e1cdb9d02ee032f605a9d94ec7a"; coq-version = "8.10"; }`.
+
+The associated package set can be obtained using `mkCoqPackages coq`, where `coq` is the derivation to use.
+
+## Coq packages attribute sets: `coqPackages` {#coq-packages-attribute-sets-coqpackages}
+
+The recommended way of defining a derivation for a Coq library, is to use the `coqPackages.mkCoqDerivation` function, which is essentially a specialization of `mkDerivation` taking into account most of the specifics of Coq libraries. The following attributes are supported:
+
+* `pname` (required) is the name of the package,
+* `version` (optional, defaults to `null`), is the version to fetch and build,
+  this attribute is interpreted in several ways depending on its type and pattern:
+  * if it is a known released version string, i.e. from the `release` attribute below, the according release is picked, and the `version` attribute of the resulting derivation is set to this release string,
+  * if it is a majorMinor `"x.y"` prefix of a known released version (as defined above), then the latest `"x.y.z"` known released version is selected (for the ordering given by `versionAtLeast`),
+  * if it is a path or a string representing an absolute path (i.e. starting with `"/"`), the provided path is selected as a source, and the `version` attribute of the resulting derivation is set to `"dev"`,
+  * if it is a string of the form `owner:branch` then it tries to download the `branch` of owner `owner` for a project of the same name using the same vcs, and the `version` attribute of the resulting derivation is set to `"dev"`, additionally if the owner is not provided (i.e. if the `owner:` prefix is missing), it defaults to the original owner of the package (see below),
+  * if it is a string of the form `"#N"`, and the domain is github, then it tries to download the current head of the pull request `#N` from github,
+* `defaultVersion` (optional). Coq libraries may be compatible with some specific versions of Coq only. The `defaultVersion` attribute is used when no `version` is provided (or if `version = null`) to select the version of the library to use by default, depending on the context. This selection will mainly depend on a `coq` version number but also possibly on other packages versions (e.g. `mathcomp`). If its value ends up to be `null`, the package is marked for removal in end-user `coqPackages` attribute set.
+* `release` (optional, defaults to `{}`), lists all the known releases of the library and for each of them provides an attribute set with at least a `sha256` attribute (you may put the empty string `""` in order to automatically insert a fake sha256, this will trigger an error which will allow you to find the correct sha256), each attribute set of the list of releases also takes optional overloading arguments for the fetcher as below (i.e.`domain`, `owner`, `repo`, `rev` assuming the default fetcher is used) and optional overrides for the result of the fetcher (i.e. `version` and `src`).
+* `fetcher` (optional, defaults to a generic fetching mechanism supporting github or gitlab based infrastructures), is a function that takes at least an `owner`, a `repo`, a `rev`, and a `hash` and returns an attribute set with a `version` and `src`.
+* `repo` (optional, defaults to the value of `pname`),
+* `owner` (optional, defaults to `"coq-community"`).
+* `domain` (optional, defaults to `"github.com"`), domains including the strings `"github"` or `"gitlab"` in their names are automatically supported, otherwise, one must change the `fetcher` argument to support them (cf `pkgs/development/coq-modules/heq/default.nix` for an example),
+* `releaseRev` (optional, defaults to `(v: v)`), provides a default mapping from release names to revision hashes/branch names/tags,
+* `displayVersion` (optional), provides a way to alter the computation of `name` from `pname`, by explaining how to display version numbers,
+* `namePrefix` (optional, defaults to `[ "coq" ]`), provides a way to alter the computation of `name` from `pname`, by explaining which dependencies must occur in `name`,
+* `nativeBuildInputs` (optional), is a list of executables that are required to build the current derivation, in addition to the default ones (namely `which`, `dune` and `ocaml` depending on whether `useDune`, `useDuneifVersion` and `mlPlugin` are set).
+* `extraNativeBuildInputs` (optional, deprecated), an additional list of derivation to add to `nativeBuildInputs`,
+* `overrideNativeBuildInputs` (optional) replaces the default list of derivation to which `nativeBuildInputs` and `extraNativeBuildInputs` adds extra elements,
+* `buildInputs` (optional), is a list of libraries and dependencies that are required to build and run the current derivation, in addition to the default one `[ coq ]`,
+* `extraBuildInputs` (optional, deprecated), an additional list of derivation to add to `buildInputs`,
+* `overrideBuildInputs` (optional) replaces the default list of derivation to which `buildInputs` and `extraBuildInputs` adds extras elements,
+* `propagatedBuildInputs` (optional) is passed as is to `mkDerivation`, we recommend to use this for Coq libraries and Coq plugin dependencies, as this makes sure the paths of the compiled libraries and plugins will always be added to the build environments of subsequent derivation, which is necessary for Coq packages to work correctly,
+* `mlPlugin` (optional, defaults to `false`). Some extensions (plugins) might require OCaml and sometimes other OCaml packages. Standard dependencies can be added by setting the current option to `true`. For a finer grain control, the `coq.ocamlPackages` attribute can be used in `nativeBuildInputs`, `buildInputs`, and `propagatedBuildInputs` to depend on the same package set Coq was built against.
+* `useDuneifVersion` (optional, default to `(x: false)` uses Dune to build the package if the provided predicate evaluates to true on the version, e.g. `useDuneifVersion = versions.isGe "1.1"`  will use dune if the version of the package is greater or equal to `"1.1"`,
+* `useDune` (optional, defaults to `false`) uses Dune to build the package if set to true, the presence of this attribute overrides the behavior of the previous one.
+* `opam-name` (optional, defaults to concatenating with a dash separator the components of `namePrefix` and `pname`), name of the Dune package to build.
+* `enableParallelBuilding` (optional, defaults to `true`), since it is activated by default, we provide a way to disable it.
+* `extraInstallFlags` (optional), allows to extend `installFlags` which initializes the variable `COQMF_COQLIB` so as to install in the proper subdirectory. Indeed Coq libraries should be installed in `$(out)/lib/coq/${coq.coq-version}/user-contrib/`. Such directories are automatically added to the `$COQPATH` environment variable by the hook defined in the Coq derivation.
+* `setCOQBIN` (optional, defaults to `true`), by default, the environment variable `$COQBIN` is set to the current Coq's binary, but one can disable this behavior by setting it to `false`,
+* `useMelquiondRemake` (optional, default to `null`) is an attribute set, which, if given, overloads the `preConfigurePhases`, `configureFlags`, `buildPhase`, and `installPhase` attributes of the derivation for a specific use in libraries using `remake` as set up by Guillaume Melquiond for `flocq`, `gappalib`, `interval`, and `coquelicot` (see the corresponding derivation for concrete examples of use of this option). For backward compatibility, the attribute `useMelquiondRemake.logpath` must be set to the logical root of the library (otherwise, one can pass `useMelquiondRemake = {}` to activate this without backward compatibility).
+* `dropAttrs`, `keepAttrs`, `dropDerivationAttrs` are all optional and allow to tune which attribute is added or removed from the final call to `mkDerivation`.
+
+It also takes other standard `mkDerivation` attributes, they are added as such, except for `meta` which extends an automatically computed `meta` (where the `platform` is the same as `coq` and the homepage is automatically computed).
+
+Here is a simple package example. It is a pure Coq library, thus it depends on Coq. It builds on the Mathematical Components library, thus it also takes some `mathcomp` derivations as `extraBuildInputs`.
+
+```nix
+{ lib, mkCoqDerivation, version ? null
+, coq, mathcomp, mathcomp-finmap, mathcomp-bigenough }:
+
+let
+  inherit (lib) licenses maintainers switch;
+  inherit (lib.versions) range;
+in
+
+mkCoqDerivation {
+  /* namePrefix leads to e.g. `name = coq8.11-mathcomp1.11-multinomials-1.5.2` */
+  namePrefix = [ "coq" "mathcomp" ];
+  pname = "multinomials";
+  owner = "math-comp";
+  inherit version;
+  defaultVersion =  with versions; switch [ coq.version mathcomp.version ] [
+      { cases = [ (range "8.7" "8.12")  "1.11.0" ];             out = "1.5.2"; }
+      { cases = [ (range "8.7" "8.11")  (range "1.8" "1.10") ]; out = "1.5.0"; }
+      { cases = [ (range "8.7" "8.10")  (range "1.8" "1.10") ]; out = "1.4"; }
+      { cases = [ "8.6"                 (range "1.6" "1.7") ];  out = "1.1"; }
+    ] null;
+  release = {
+    "1.5.2".sha256 = "15aspf3jfykp1xgsxf8knqkxv8aav2p39c2fyirw7pwsfbsv2c4s";
+    "1.5.1".sha256 = "13nlfm2wqripaq671gakz5mn4r0xwm0646araxv0nh455p9ndjs3";
+    "1.5.0".sha256 = "064rvc0x5g7y1a0nip6ic91vzmq52alf6in2bc2dmss6dmzv90hw";
+    "1.5.0".rev    = "1.5";
+    "1.4".sha256   = "0vnkirs8iqsv8s59yx1fvg1nkwnzydl42z3scya1xp1b48qkgn0p";
+    "1.3".sha256   = "0l3vi5n094nx3qmy66hsv867fnqm196r8v605kpk24gl0aa57wh4";
+    "1.2".sha256   = "1mh1w339dslgv4f810xr1b8v2w7rpx6fgk9pz96q0fyq49fw2xcq";
+    "1.1".sha256   = "1q8alsm89wkc0lhcvxlyn0pd8rbl2nnxg81zyrabpz610qqjqc3s";
+    "1.0".sha256   = "1qmbxp1h81cy3imh627pznmng0kvv37k4hrwi2faa101s6bcx55m";
+  };
+
+  propagatedBuildInputs =
+    [ mathcomp.ssreflect mathcomp.algebra mathcomp-finmap mathcomp-bigenough ];
+
+  meta = {
+    description = "A Coq/SSReflect Library for Monoidal Rings and Multinomials";
+    license = licenses.cecill-c;
+  };
+}
+```
+
+## Three ways of overriding Coq packages {#coq-overriding-packages}
+
+There are three distinct ways of changing a Coq package by overriding one of its values: `.override`, `overrideCoqDerivation`, and `.overrideAttrs`.  This section explains what sort of values can be overridden with each of these methods.
+
+### `.override` {#coq-override}
+
+`.override` lets you change arguments to a Coq derivation.  In the case of the `multinomials` package above, `.override` would let you override arguments like `mkCoqDerivation`, `version`, `coq`, `mathcomp`, `mathcom-finmap`, etc.
+
+For example, assuming you have a special `mathcomp` dependency you want to use, here is how you could override the `mathcomp` dependency:
+
+```nix
+multinomials.override {
+  mathcomp = my-special-mathcomp;
+}
+```
+
+In Nixpkgs, all Coq derivations take a `version` argument.  This can be overridden in order to easily use a different version:
+
+```nix
+coqPackages.multinomials.override {
+  version = "1.5.1";
+}
+```
+
+Refer to [](#coq-packages-attribute-sets-coqpackages) for all the different formats that you can potentially pass to `version`, as well as the restrictions.
+
+### `overrideCoqDerivation` {#coq-overrideCoqDerivation}
+
+The `overrideCoqDerivation` function lets you easily change arguments to `mkCoqDerivation`.  These arguments are described in [](#coq-packages-attribute-sets-coqpackages).
+
+For example, here is how you could locally add a new release of the `multinomials` library, and set the `defaultVersion` to use this release:
+
+```nix
+coqPackages.lib.overrideCoqDerivation
+  {
+    defaultVersion = "2.0";
+    release."2.0".sha256 = "1lq8x86vd3vqqh2yq6hvyagpnhfq5wmk5pg2z0xq7b7dbbbhyfkk";
+  }
+  coqPackages.multinomials
+```
+
+### `.overrideAttrs` {#coq-overrideAttrs}
+
+`.overrideAttrs` lets you override arguments to the underlying `stdenv.mkDerivation` call. Internally, `mkCoqDerivation` uses `stdenv.mkDerivation` to create derivations for Coq libraries.  You can override arguments to `stdenv.mkDerivation` with `.overrideAttrs`.
+
+For instance, here is how you could add some code to be performed in the derivation after installation is complete:
+
+```nix
+coqPackages.multinomials.overrideAttrs (oldAttrs: {
+  postInstall = oldAttrs.postInstall or "" + ''
+    echo "you can do anything you want here"
+  '';
+})
+```
diff --git a/nixpkgs/doc/languages-frameworks/crystal.section.md b/nixpkgs/doc/languages-frameworks/crystal.section.md
new file mode 100644
index 000000000000..9953f357048a
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/crystal.section.md
@@ -0,0 +1,77 @@
+# Crystal {#crystal}
+
+## Building a Crystal package {#building-a-crystal-package}
+
+This section uses [Mint](https://github.com/mint-lang/mint) as an example for how to build a Crystal package.
+
+If the Crystal project has any dependencies, the first step is to get a `shards.nix` file encoding those. Get a copy of the project and go to its root directory such that its `shard.lock` file is in the current directory. Executable projects should usually commit the `shard.lock` file, but sometimes that's not the case, which means you need to generate it yourself. With an existing `shard.lock` file, `crystal2nix` can be run.
+```bash
+$ git clone https://github.com/mint-lang/mint
+$ cd mint
+$ git checkout 0.5.0
+$ if [ ! -f shard.lock ]; then nix-shell -p shards --run "shards lock"; fi
+$ nix-shell -p crystal2nix --run crystal2nix
+```
+
+This should have generated a `shards.nix` file.
+
+Next create a Nix file for your derivation and use `pkgs.crystal.buildCrystalPackage` as follows:
+
+```nix
+with import <nixpkgs> {};
+crystal.buildCrystalPackage rec {
+  pname = "mint";
+  version = "0.5.0";
+
+  src = fetchFromGitHub {
+    owner = "mint-lang";
+    repo = "mint";
+    rev = version;
+    hash = "sha256-dFN9l5fgrM/TtOPqlQvUYgixE4KPr629aBmkwdDoq28=";
+  };
+
+  # Insert the path to your shards.nix file here
+  shardsFile = ./shards.nix;
+
+  # ...
+}
+```
+
+This won't build anything yet, because we haven't told it what files build. We can specify a mapping from binary names to source files with the `crystalBinaries` attribute. The project's compilation instructions should show this. For Mint, the binary is called "mint", which is compiled from the source file `src/mint.cr`, so we'll specify this as follows:
+
+```nix
+{
+  crystalBinaries.mint.src = "src/mint.cr";
+
+  # ...
+}
+```
+
+Additionally you can override the default `crystal build` options (which are currently `--release --progress --no-debug --verbose`) with
+
+```nix
+{
+  crystalBinaries.mint.options = [ "--release" "--verbose" ];
+}
+```
+
+Depending on the project, you might need additional steps to get it to compile successfully. In Mint's case, we need to link against openssl, so in the end the Nix file looks as follows:
+
+```nix
+with import <nixpkgs> {};
+crystal.buildCrystalPackage rec {
+  version = "0.5.0";
+  pname = "mint";
+  src = fetchFromGitHub {
+    owner = "mint-lang";
+    repo = "mint";
+    rev = version;
+    hash = "sha256-dFN9l5fgrM/TtOPqlQvUYgixE4KPr629aBmkwdDoq28=";
+  };
+
+  shardsFile = ./shards.nix;
+  crystalBinaries.mint.src = "src/mint.cr";
+
+  buildInputs = [ openssl ];
+}
+```
diff --git a/nixpkgs/doc/languages-frameworks/cuda.section.md b/nixpkgs/doc/languages-frameworks/cuda.section.md
new file mode 100644
index 000000000000..9791018c7f5f
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/cuda.section.md
@@ -0,0 +1,151 @@
+# CUDA {#cuda}
+
+CUDA-only packages are stored in the `cudaPackages` packages set. This set
+includes the `cudatoolkit`, portions of the toolkit in separate derivations,
+`cudnn`, `cutensor` and `nccl`.
+
+A package set is available for each CUDA version, so for example
+`cudaPackages_11_6`. Within each set is a matching version of the above listed
+packages. Additionally, other versions of the packages that are packaged and
+compatible are available as well. For example, there can be a
+`cudaPackages.cudnn_8_3` package.
+
+To use one or more CUDA packages in an expression, give the expression a `cudaPackages` parameter, and in case CUDA is optional
+```nix
+{ config
+, cudaSupport ? config.cudaSupport
+, cudaPackages ? { }
+, ...
+}: {}
+```
+
+When using `callPackage`, you can choose to pass in a different variant, e.g.
+when a different version of the toolkit suffices
+```nix
+{
+  mypkg = callPackage { cudaPackages = cudaPackages_11_5; };
+}
+```
+
+If another version of say `cudnn` or `cutensor` is needed, you can override the
+package set to make it the default. This guarantees you get a consistent package
+set.
+```nix
+{
+  mypkg = let
+    cudaPackages = cudaPackages_11_5.overrideScope (final: prev: {
+      cudnn = prev.cudnn_8_3;
+    });
+  in callPackage { inherit cudaPackages; };
+}
+```
+
+The CUDA NVCC compiler requires flags to determine which hardware you
+want to target for in terms of SASS (real hardware) or PTX (JIT kernels).
+
+Nixpkgs tries to target support real architecture defaults based on the
+CUDA toolkit version with PTX support for future hardware.  Experienced
+users may optimize this configuration for a variety of reasons such as
+reducing binary size and compile time, supporting legacy hardware, or
+optimizing for specific hardware.
+
+You may provide capabilities to add support or reduce binary size through
+`config` using `cudaCapabilities = [ "6.0" "7.0" ];` and
+`cudaForwardCompat = true;` if you want PTX support for future hardware.
+
+Please consult [GPUs supported](https://en.wikipedia.org/wiki/CUDA#GPUs_supported)
+for your specific card(s).
+
+Library maintainers should consult [NVCC Docs](https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/)
+and release notes for their software package.
+
+## Adding a new CUDA release {#adding-a-new-cuda-release}
+
+> **WARNING**
+>
+> This section of the docs is still very much in progress. Feedback is welcome in GitHub Issues tagging @NixOS/cuda-maintainers or on [Matrix](https://matrix.to/#/#cuda:nixos.org).
+
+The CUDA Toolkit is a suite of CUDA libraries and software meant to provide a development environment for CUDA-accelerated applications. Until the release of CUDA 11.4, NVIDIA had only made the CUDA Toolkit available as a multi-gigabyte runfile installer, which we provide through the [`cudaPackages.cudatoolkit`](https://search.nixos.org/packages?channel=unstable&type=packages&query=cudaPackages.cudatoolkit) attribute. From CUDA 11.4 and onwards, NVIDIA has also provided CUDA redistributables (“CUDA-redist”): individually packaged CUDA Toolkit components meant to facilitate redistribution and inclusion in downstream projects. These packages are available in the [`cudaPackages`](https://search.nixos.org/packages?channel=unstable&type=packages&query=cudaPackages) package set.
+
+All new projects should use the CUDA redistributables available in [`cudaPackages`](https://search.nixos.org/packages?channel=unstable&type=packages&query=cudaPackages) in place of [`cudaPackages.cudatoolkit`](https://search.nixos.org/packages?channel=unstable&type=packages&query=cudaPackages.cudatoolkit), as they are much easier to maintain and update.
+
+### Updating CUDA redistributables {#updating-cuda-redistributables}
+
+1. Go to NVIDIA's index of CUDA redistributables: <https://developer.download.nvidia.com/compute/cuda/redist/>
+2. Make a note of the new version of CUDA available.
+3. Run
+
+   ```bash
+   nix run github:connorbaker/cuda-redist-find-features -- \
+      download-manifests \
+      --log-level DEBUG \
+      --version <newest CUDA version> \
+      https://developer.download.nvidia.com/compute/cuda/redist \
+      ./pkgs/development/cuda-modules/cuda/manifests
+   ```
+
+   This will download a copy of the manifest for the new version of CUDA.
+4. Run
+
+   ```bash
+   nix run github:connorbaker/cuda-redist-find-features -- \
+      process-manifests \
+      --log-level DEBUG \
+      --version <newest CUDA version> \
+      https://developer.download.nvidia.com/compute/cuda/redist \
+      ./pkgs/development/cuda-modules/cuda/manifests
+   ```
+
+   This will generate a `redistrib_features_<newest CUDA version>.json` file in the same directory as the manifest.
+5. Update the `cudaVersionMap` attribute set in `pkgs/development/cuda-modules/cuda/extension.nix`.
+
+### Updating cuTensor {#updating-cutensor}
+
+1. Repeat the steps present in [Updating CUDA redistributables](#updating-cuda-redistributables) with the following changes:
+   - Use the index of cuTensor redistributables: <https://developer.download.nvidia.com/compute/cutensor/redist>
+   - Use the newest version of cuTensor available instead of the newest version of CUDA.
+   - Use `pkgs/development/cuda-modules/cutensor/manifests` instead of `pkgs/development/cuda-modules/cuda/manifests`.
+   - Skip the step of updating `cudaVersionMap` in `pkgs/development/cuda-modules/cuda/extension.nix`.
+
+### Updating supported compilers and GPUs {#updating-supported-compilers-and-gpus}
+
+1. Update `nvcc-compatibilities.nix` in `pkgs/development/cuda-modules/` to include the newest release of NVCC, as well as any newly supported host compilers.
+2. Update `gpus.nix` in `pkgs/development/cuda-modules/` to include any new GPUs supported by the new release of CUDA.
+
+### Updating the CUDA Toolkit runfile installer {#updating-the-cuda-toolkit}
+
+> **WARNING**
+>
+> While the CUDA Toolkit runfile installer is still available in Nixpkgs as the [`cudaPackages.cudatoolkit`](https://search.nixos.org/packages?channel=unstable&type=packages&query=cudaPackages.cudatoolkit) attribute, its use is not recommended and should it be considered deprecated. Please migrate to the CUDA redistributables provided by the [`cudaPackages`](https://search.nixos.org/packages?channel=unstable&type=packages&query=cudaPackages) package set.
+>
+> To ensure packages relying on the CUDA Toolkit runfile installer continue to build, it will continue to be updated until a migration path is available.
+
+1. Go to NVIDIA's CUDA Toolkit runfile installer download page: <https://developer.nvidia.com/cuda-downloads>
+2. Select the appropriate OS, architecture, distribution, and version, and installer type.
+
+   - For example: Linux, x86_64, Ubuntu, 22.04, runfile (local)
+   - NOTE: Typically, we use the Ubuntu runfile. It is unclear if the runfile for other distributions will work.
+
+3. Take the link provided by the installer instructions on the webpage after selecting the installer type and get its hash by running:
+
+   ```bash
+   nix store prefetch-file --hash-type sha256 <link>
+   ```
+
+4. Update `pkgs/development/cuda-modules/cudatoolkit/releases.nix` to include the release.
+
+### Updating the CUDA package set {#updating-the-cuda-package-set}
+
+1. Include a new `cudaPackages_<major>_<minor>` package set in `pkgs/top-level/all-packages.nix`.
+
+   - NOTE: Changing the default CUDA package set should occur in a separate PR, allowing time for additional testing.
+
+2. Successfully build the closure of the new package set, updating `pkgs/development/cuda-modules/cuda/overrides.nix` as needed. Below are some common failures:
+
+| Unable to ... | During ... | Reason | Solution | Note |
+| --- | --- | --- | --- | --- |
+| Find headers | `configurePhase` or `buildPhase` | Missing dependency on a `dev` output | Add the missing dependency | The `dev` output typically contain the headers |
+| Find libraries | `configurePhase` | Missing dependency on a `dev` output | Add the missing dependency | The `dev` output typically contain CMake configuration files |
+| Find libraries | `buildPhase` or `patchelf` | Missing dependency on a `lib` or `static` output | Add the missing dependency | The `lib` or `static` output typically contain the libraries |
+
+In the scenario you are unable to run the resulting binary: this is arguably the most complicated as it could be any combination of the previous reasons. This type of failure typically occurs when a library attempts to load or open a library it depends on that it does not declare in its `DT_NEEDED` section. As a first step, ensure that dependencies are patched with [`cudaPackages.autoAddDriverRunpath`](https://search.nixos.org/packages?channel=unstable&type=packages&query=cudaPackages.autoAddDriverRunpath). Failing that, try running the application with [`nixGL`](https://github.com/guibou/nixGL) or a similar wrapper tool. If that works, it likely means that the application is attempting to load a library that is not in the `RPATH` or `RUNPATH` of the binary.
diff --git a/nixpkgs/doc/languages-frameworks/cuelang.section.md b/nixpkgs/doc/languages-frameworks/cuelang.section.md
new file mode 100644
index 000000000000..70329b15fd7d
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/cuelang.section.md
@@ -0,0 +1,93 @@
+# Cue (Cuelang) {#cuelang}
+
+[Cuelang](https://cuelang.org/) is a language to:
+
+- describe schemas and validate backward-compatibility
+- generate code and schemas in various formats (e.g. JSON Schema, OpenAPI)
+- do configuration akin to [Dhall Lang](https://dhall-lang.org/)
+- perform data validation
+
+## Cuelang schema quick start {#cuelang-quickstart}
+
+Cuelang schemas are similar to JSON, here is a quick cheatsheet:
+
+- Default types includes: `null`, `string`, `bool`, `bytes`, `number`, `int`, `float`, lists as `[...T]` where `T` is a type.
+- All structures, defined by: `myStructName: { <fields> }` are **open** -- they accept fields which are not specified.
+- Closed structures can be built by doing `myStructName: close({ <fields> })` -- they are strict in what they accept.
+- `#X` are **definitions**, referenced definitions are **recursively closed**, i.e. all its children structures are **closed**.
+- `&` operator is the [unification operator](https://cuelang.org/docs/references/spec/#unification) (similar to a type-level merging operator), `|` is the [disjunction operator](https://cuelang.org/docs/references/spec/#disjunction) (similar to a type-level union operator).
+- Values **are** types, i.e. `myStruct: { a: 3 }` is a valid type definition that only allows `3` as value.
+
+- Read <https://cuelang.org/docs/concepts/logic/> to learn more about the semantics.
+- Read <https://cuelang.org/docs/references/spec/> to learn about the language specification.
+
+## `writeCueValidator` {#cuelang-writeCueValidator}
+
+Nixpkgs provides a `pkgs.writeCueValidator` helper, which will write a validation script based on the provided Cuelang schema.
+
+Here is an example:
+```nix
+pkgs.writeCueValidator
+  (pkgs.writeText "schema.cue" ''
+    #Def1: {
+      field1: string
+    }
+  '')
+  { document = "#Def1"; }
+```
+
+- The first parameter is the Cue schema file.
+- The second parameter is an options parameter, currently, only: `document` can be passed.
+
+`document` : match your input data against this fragment of structure or definition, e.g. you may use the same schema file but different documents based on the data you are validating.
+
+Another example, given the following `validator.nix` :
+```nix
+{ pkgs ? import <nixpkgs> {} }:
+let
+  genericValidator = version:
+  pkgs.writeCueValidator
+    (pkgs.writeText "schema.cue" ''
+      #Version1: {
+        field1: string
+      }
+      #Version2: #Version1 & {
+        field1: "unused"
+      }''
+    )
+    { document = "#Version${toString version}"; };
+in
+{
+  validateV1 = genericValidator 1;
+  validateV2 = genericValidator 2;
+}
+```
+
+The result is a script that will validate the file you pass as the first argument against the schema you provided `writeCueValidator`.
+
+It can be any format that `cue vet` supports, i.e. YAML or JSON for example.
+
+Here is an example, named `example.json`, given the following JSON:
+```
+{ "field1": "abc" }
+```
+
+You can run the result script (named `validate`) as the following:
+
+```console
+$ nix-build validator.nix
+$ ./result example.json
+$ ./result-2 example.json
+field1: conflicting values "unused" and "abc":
+    ./example.json:1:13
+    ../../../../../../nix/store/v64dzx3vr3glpk0cq4hzmh450lrwh6sg-schema.cue:5:11
+$ sed -i 's/"abc"/3/' example.json
+$ ./result example.json
+field1: conflicting values 3 and string (mismatched types int and string):
+    ./example.json:1:13
+    ../../../../../../nix/store/v64dzx3vr3glpk0cq4hzmh450lrwh6sg-schema.cue:5:11
+```
+
+**Known limitations**
+
+* The script will enforce **concrete** values and will not accept lossy transformations (strictness). You can add these options if you need them.
diff --git a/nixpkgs/doc/languages-frameworks/dart.section.md b/nixpkgs/doc/languages-frameworks/dart.section.md
new file mode 100644
index 000000000000..019765f75354
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/dart.section.md
@@ -0,0 +1,136 @@
+# Dart {#sec-language-dart}
+
+## Dart applications {#ssec-dart-applications}
+
+The function `buildDartApplication` builds Dart applications managed with pub.
+
+It fetches its Dart dependencies automatically through `pub2nix`, and (through a series of hooks) builds and installs the executables specified in the pubspec file. The hooks can be used in other derivations, if needed. The phases can also be overridden to do something different from installing binaries.
+
+If you are packaging a Flutter desktop application, use [`buildFlutterApplication`](#ssec-dart-flutter) instead.
+
+`pubspecLock` is the parsed pubspec.lock file. pub2nix uses this to download required packages.
+This can be converted to JSON from YAML with something like `yq . pubspec.lock`, and then read by Nix.
+
+Alternatively, `autoPubspecLock` can be used instead, and set to a path to a regular `pubspec.lock` file. This relies on import-from-derivation, and is not permitted in Nixpkgs, but can be useful at other times.
+
+::: {.warning}
+When using `autoPubspecLock` with a local source directory, make sure to use a
+concatenation operator (e.g. `autoPubspecLock = src + "/pubspec.lock";`), and
+not string interpolation.
+
+String interpolation will copy your entire source directory to the Nix store and
+use its store path, meaning that unrelated changes to your source tree will
+cause the generated `pubspec.lock` derivation to rebuild!
+:::
+
+If the package has Git package dependencies, the hashes must be provided in the `gitHashes` set. If a hash is missing, an error message prompting you to add it will be shown.
+
+The `dart` commands run can be overridden through `pubGetScript` and `dartCompileCommand`, you can also add flags using `dartCompileFlags` or `dartJitFlags`.
+
+Dart supports multiple [outputs types](https://dart.dev/tools/dart-compile#types-of-output), you can choose between them using `dartOutputType` (defaults to `exe`). If you want to override the binaries path or the source path they come from, you can use `dartEntryPoints`. Outputs that require a runtime will automatically be wrapped with the relevant runtime (`dartaotruntime` for `aot-snapshot`, `dart run` for `jit-snapshot` and `kernel`, `node` for `js`), this can be overridden through `dartRuntimeCommand`.
+
+```nix
+{ lib, buildDartApplication, fetchFromGitHub }:
+
+buildDartApplication rec {
+  pname = "dart-sass";
+  version = "1.62.1";
+
+  src = fetchFromGitHub {
+    owner = "sass";
+    repo = pname;
+    rev = version;
+    hash = "sha256-U6enz8yJcc4Wf8m54eYIAnVg/jsGi247Wy8lp1r1wg4=";
+  };
+
+  pubspecLock = lib.importJSON ./pubspec.lock.json;
+}
+```
+
+### Patching dependencies {#ssec-dart-applications-patching-dependencies}
+
+Some Dart packages require patches or build environment changes. Package derivations can be customised with the `customSourceBuilders` argument.
+
+A collection of such customisations can be found in Nixpkgs, in the `development/compilers/dart/package-source-builders` directory.
+
+This allows fixes for packages to be shared between all applications that use them. It is strongly recommended to add to this collection instead of including fixes in your application derivation itself.
+
+### Running executables from dev_dependencies {#ssec-dart-applications-build-tools}
+
+Many Dart applications require executables from the `dev_dependencies` section in `pubspec.yaml` to be run before building them.
+
+This can be done in `preBuild`, in one of two ways:
+
+1. Packaging the tool with `buildDartApplication`, adding it to Nixpkgs, and running it like any other application
+2. Running the tool from the package cache
+
+Of these methods, the first is recommended when using a tool that does not need
+to be of a specific version.
+
+For the second method, the `packageRun` function from the `dartConfigHook` can be used.
+This is an alternative to `dart run` that does not rely on Pub.
+
+e.g., for `build_runner`:
+
+```bash
+packageRun build_runner build
+```
+
+Do _not_ use `dart run <package_name>`, as this will attempt to download dependencies with Pub.
+
+### Usage with nix-shell {#ssec-dart-applications-nix-shell}
+
+#### Using dependencies from the Nix store {#ssec-dart-applications-nix-shell-deps}
+
+As `buildDartApplication` provides dependencies instead of `pub get`, Dart needs to be explicitly told where to find them.
+
+Run the following commands in the source directory to configure Dart appropriately.
+Do not use `pub` after doing so; it will download the dependencies itself and overwrite these changes.
+
+```bash
+cp --no-preserve=all "$pubspecLockFilePath" pubspec.lock
+mkdir -p .dart_tool && cp --no-preserve=all "$packageConfig" .dart_tool/package_config.json
+```
+
+## Flutter applications {#ssec-dart-flutter}
+
+The function `buildFlutterApplication` builds Flutter applications.
+
+See the [Dart documentation](#ssec-dart-applications) for more details on required files and arguments.
+
+```nix
+{  flutter, fetchFromGitHub }:
+
+flutter.buildFlutterApplication {
+  pname = "firmware-updater";
+  version = "0-unstable-2023-04-30";
+
+  # To build for the Web, use the targetFlutterPlatform argument.
+  # targetFlutterPlatform = "web";
+
+  src = fetchFromGitHub {
+    owner = "canonical";
+    repo = "firmware-updater";
+    rev = "6e7dbdb64e344633ea62874b54ff3990bd3b8440";
+    sha256 = "sha256-s5mwtr5MSPqLMN+k851+pFIFFPa0N1hqz97ys050tFA=";
+    fetchSubmodules = true;
+  };
+
+  pubspecLock = lib.importJSON ./pubspec.lock.json;
+}
+```
+
+### Usage with nix-shell {#ssec-dart-flutter-nix-shell}
+
+Flutter-specific `nix-shell` usage notes are included here. See the [Dart documentation](#ssec-dart-applications-nix-shell) for general `nix-shell` instructions.
+
+#### Entering the shell {#ssec-dart-flutter-nix-shell-enter}
+
+By default, dependencies for only the `targetFlutterPlatform` are available in the
+build environment. This is useful for keeping closures small, but be problematic
+during development. It's common, for example, to build Web apps for Linux during
+development to take advantage of native features such as stateful hot reload.
+
+To enter a shell with all the usual target platforms available, use the `multiShell` attribute.
+
+e.g. `nix-shell '<nixpkgs>' -A fluffychat-web.multiShell`.
diff --git a/nixpkgs/doc/languages-frameworks/dhall.section.md b/nixpkgs/doc/languages-frameworks/dhall.section.md
new file mode 100644
index 000000000000..8d85c9f1daf7
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/dhall.section.md
@@ -0,0 +1,472 @@
+# Dhall {#sec-language-dhall}
+
+The Nixpkgs support for Dhall assumes some familiarity with Dhall's language
+support for importing Dhall expressions, which is documented here:
+
+* [`dhall-lang.org` - Installing packages](https://docs.dhall-lang.org/tutorials/Language-Tour.html#installing-packages)
+
+## Remote imports {#ssec-dhall-remote-imports}
+
+Nixpkgs bypasses Dhall's support for remote imports using Dhall's
+semantic integrity checks.  Specifically, any Dhall import can be protected by
+an integrity check like:
+
+```dhall
+https://prelude.dhall-lang.org/v20.1.0/package.dhall
+  sha256:26b0ef498663d269e4dc6a82b0ee289ec565d683ef4c00d0ebdd25333a5a3c98
+```
+
+… and if the import is cached then the interpreter will load the import from
+cache instead of fetching the URL.
+
+Nixpkgs uses this trick to add all of a Dhall expression's dependencies into the
+cache so that the Dhall interpreter never needs to resolve any remote URLs.  In
+fact, Nixpkgs uses a Dhall interpreter with remote imports disabled when
+packaging Dhall expressions to enforce that the interpreter never resolves a
+remote import.  This means that Nixpkgs only supports building Dhall expressions
+if all of their remote imports are protected by semantic integrity checks.
+
+Instead of remote imports, Nixpkgs uses Nix to fetch remote Dhall code.  For
+example, the Prelude Dhall package uses `pkgs.fetchFromGitHub` to fetch the
+`dhall-lang` repository containing the Prelude.  Relying exclusively on Nix
+to fetch Dhall code ensures that Dhall packages built using Nix remain pure and
+also behave well when built within a sandbox.
+
+## Packaging a Dhall expression from scratch {#ssec-dhall-packaging-expression}
+
+We can illustrate how Nixpkgs integrates Dhall by beginning from the following
+trivial Dhall expression with one dependency (the Prelude):
+
+```dhall
+-- ./true.dhall
+
+let Prelude = https://prelude.dhall-lang.org/v20.1.0/package.dhall
+
+in  Prelude.Bool.not False
+```
+
+As written, this expression cannot be built using Nixpkgs because the
+expression does not protect the Prelude import with a semantic integrity
+check, so the first step is to freeze the expression using `dhall freeze`,
+like this:
+
+```ShellSession
+$ dhall freeze --inplace ./true.dhall
+```
+
+… which gives us:
+
+```dhall
+-- ./true.dhall
+
+let Prelude =
+      https://prelude.dhall-lang.org/v20.1.0/package.dhall
+        sha256:26b0ef498663d269e4dc6a82b0ee289ec565d683ef4c00d0ebdd25333a5a3c98
+
+in  Prelude.Bool.not False
+```
+
+To package that expression, we create a `./true.nix` file containing the
+following specification for the Dhall package:
+
+```nix
+# ./true.nix
+
+{ buildDhallPackage, Prelude }:
+
+buildDhallPackage {
+  name = "true";
+  code = ./true.dhall;
+  dependencies = [ Prelude ];
+  source = true;
+}
+```
+
+… and we complete the build by incorporating that Dhall package into the
+`pkgs.dhallPackages` hierarchy using an overlay, like this:
+
+```nix
+# ./example.nix
+
+let
+  nixpkgs = builtins.fetchTarball {
+    url    = "https://github.com/NixOS/nixpkgs/archive/94b2848559b12a8ed1fe433084686b2a81123c99.tar.gz";
+    hash = "sha256-B4Q3c6IvTLg3Q92qYa8y+i4uTaphtFdjp+Ir3QQjdN0=";
+  };
+
+  dhallOverlay = self: super: {
+    true = self.callPackage ./true.nix { };
+  };
+
+  overlay = self: super: {
+    dhallPackages = super.dhallPackages.override (old: {
+      overrides =
+        self.lib.composeExtensions (old.overrides or (_: _: {})) dhallOverlay;
+    });
+  };
+
+  pkgs = import nixpkgs { config = {}; overlays = [ overlay ]; };
+
+in
+  pkgs
+```
+
+… which we can then build using this command:
+
+```ShellSession
+$ nix build --file ./example.nix dhallPackages.true
+```
+
+## Contents of a Dhall package {#ssec-dhall-package-contents}
+
+The above package produces the following directory tree:
+
+```ShellSession
+$ tree -a ./result
+result
+├── .cache
+│   └── dhall
+│       └── 122027abdeddfe8503496adeb623466caa47da5f63abd2bc6fa19f6cfcb73ecfed70
+├── binary.dhall
+└── source.dhall
+```
+
+… where:
+
+* `source.dhall` contains the result of interpreting our Dhall package:
+
+  ```ShellSession
+  $ cat ./result/source.dhall
+  True
+  ```
+
+* The `.cache` subdirectory contains one binary cache product encoding the
+  same result as `source.dhall`:
+
+  ```ShellSession
+  $ dhall decode < ./result/.cache/dhall/122027abdeddfe8503496adeb623466caa47da5f63abd2bc6fa19f6cfcb73ecfed70
+  True
+  ```
+
+* `binary.dhall` contains a Dhall expression which handles fetching and decoding
+  the same cache product:
+
+  ```ShellSession
+  $ cat ./result/binary.dhall
+  missing sha256:27abdeddfe8503496adeb623466caa47da5f63abd2bc6fa19f6cfcb73ecfed70
+  $ cp -r ./result/.cache .cache
+
+  $ chmod -R u+w .cache
+
+  $ XDG_CACHE_HOME=.cache dhall --file ./result/binary.dhall
+  True
+  ```
+
+The `source.dhall` file is only present for packages that specify
+`source = true;`.  By default, Dhall packages omit the `source.dhall` in order
+to conserve disk space when they are used exclusively as dependencies.  For
+example, if we build the Prelude package it will only contain the binary
+encoding of the expression:
+
+```ShellSession
+$ nix build --file ./example.nix dhallPackages.Prelude
+
+$ tree -a result
+result
+├── .cache
+│   └── dhall
+│       └── 122026b0ef498663d269e4dc6a82b0ee289ec565d683ef4c00d0ebdd25333a5a3c98
+└── binary.dhall
+
+2 directories, 2 files
+```
+
+Typically, you only specify `source = true;` for the top-level Dhall expression
+of interest (such as our example `true.nix` Dhall package).  However, if you
+wish to specify `source = true` for all Dhall packages, then you can amend the
+Dhall overlay like this:
+
+```nix
+{
+  dhallOverrides = self: super: {
+    # Enable source for all Dhall packages
+    buildDhallPackage =
+      args: super.buildDhallPackage (args // { source = true; });
+
+    true = self.callPackage ./true.nix { };
+  };
+}
+```
+
+… and now the Prelude will contain the fully decoded result of interpreting
+the Prelude:
+
+```ShellSession
+$ nix build --file ./example.nix dhallPackages.Prelude
+
+$ tree -a result
+result
+├── .cache
+│   └── dhall
+│       └── 122026b0ef498663d269e4dc6a82b0ee289ec565d683ef4c00d0ebdd25333a5a3c98
+├── binary.dhall
+└── source.dhall
+
+$ cat ./result/source.dhall
+{ Bool =
+  { and =
+      \(_ : List Bool) ->
+        List/fold Bool _ Bool (\(_ : Bool) -> \(_ : Bool) -> _@1 && _) True
+  , build = \(_ : Type -> _ -> _@1 -> _@2) -> _ Bool True False
+  , even =
+      \(_ : List Bool) ->
+        List/fold Bool _ Bool (\(_ : Bool) -> \(_ : Bool) -> _@1 == _) True
+  , fold =
+      \(_ : Bool) ->
+…
+```
+
+## Packaging functions {#ssec-dhall-packaging-functions}
+
+We already saw an example of using `buildDhallPackage` to create a Dhall
+package from a single file, but most Dhall packages consist of more than one
+file and there are two derived utilities that you may find more useful when
+packaging multiple files:
+
+* `buildDhallDirectoryPackage` - build a Dhall package from a local directory
+
+* `buildDhallGitHubPackage` - build a Dhall package from a GitHub repository
+
+The `buildDhallPackage` is the lowest-level function and accepts the following
+arguments:
+
+* `name`: The name of the derivation
+
+* `dependencies`: Dhall dependencies to build and cache ahead of time
+
+* `code`: The top-level expression to build for this package
+
+  Note that the `code` field accepts an arbitrary Dhall expression.  You're
+  not limited to just a file.
+
+* `source`: Set to `true` to include the decoded result as `source.dhall` in the
+  build product, at the expense of requiring more disk space
+
+* `documentationRoot`: Set to the root directory of the package if you want
+  `dhall-docs` to generate documentation underneath the `docs` subdirectory of
+  the build product
+
+The `buildDhallDirectoryPackage` is a higher-level function implemented in terms
+of `buildDhallPackage` that accepts the following arguments:
+
+* `name`: Same as `buildDhallPackage`
+
+* `dependencies`: Same as `buildDhallPackage`
+
+* `source`: Same as `buildDhallPackage`
+
+* `src`: The directory containing Dhall code that you want to turn into a Dhall
+  package
+
+* `file`: The top-level file (`package.dhall` by default) that is the entrypoint
+  to the rest of the package
+
+* `document`: Set to `true` to generate documentation for the package
+
+The `buildDhallGitHubPackage` is another higher-level function implemented in
+terms of `buildDhallPackage` that accepts the following arguments:
+
+* `name`: Same as `buildDhallPackage`
+
+* `dependencies`: Same as `buildDhallPackage`
+
+* `source`: Same as `buildDhallPackage`
+
+* `owner`: The owner of the repository
+
+* `repo`: The repository name
+
+* `rev`: The desired revision (or branch, or tag)
+
+* `directory`: The subdirectory of the Git repository to package (if a
+  directory other than the root of the repository)
+
+* `file`: The top-level file (`${directory}/package.dhall` by default) that is
+  the entrypoint to the rest of the package
+
+* `document`: Set to `true` to generate documentation for the package
+
+Additionally, `buildDhallGitHubPackage` accepts the same arguments as
+`fetchFromGitHub`, such as `hash` or `fetchSubmodules`.
+
+## `dhall-to-nixpkgs` {#ssec-dhall-dhall-to-nixpkgs}
+
+You can use the `dhall-to-nixpkgs` command-line utility to automate
+packaging Dhall code.  For example:
+
+```ShellSession
+$ nix-shell -p haskellPackages.dhall-nixpkgs nix-prefetch-git
+[nix-shell]$ dhall-to-nixpkgs github https://github.com/Gabriella439/dhall-semver.git
+{ buildDhallGitHubPackage, Prelude }:
+  buildDhallGitHubPackage {
+    name = "dhall-semver";
+    githubBase = "github.com";
+    owner = "Gabriella439";
+    repo = "dhall-semver";
+    rev = "2d44ae605302ce5dc6c657a1216887fbb96392a4";
+    fetchSubmodules = false;
+    hash = "sha256-n0nQtswVapWi/x7or0O3MEYmAkt/a1uvlOtnje6GGnk=";
+    directory = "";
+    file = "package.dhall";
+    source = false;
+    document = false;
+    dependencies = [ (Prelude.overridePackage { file = "package.dhall"; }) ];
+    }
+```
+
+:::{.note}
+`nix-prefetch-git` is added to the `nix-shell -p` invocation above, because it has to be in `$PATH` for `dhall-to-nixpkgs` to work.
+:::
+
+The utility takes care of automatically detecting remote imports and converting
+them to package dependencies.  You can also use the utility on local
+Dhall directories, too:
+
+```ShellSession
+$ dhall-to-nixpkgs directory ~/proj/dhall-semver
+{ buildDhallDirectoryPackage, Prelude }:
+  buildDhallDirectoryPackage {
+    name = "proj";
+    src = ~/proj/dhall-semver;
+    file = "package.dhall";
+    source = false;
+    document = false;
+    dependencies = [ (Prelude.overridePackage { file = "package.dhall"; }) ];
+    }
+```
+
+### Remote imports as fixed-output derivations {#ssec-dhall-remote-imports-as-fod}
+
+`dhall-to-nixpkgs` has the ability to fetch and build remote imports as
+fixed-output derivations by using their Dhall integrity check. This is
+sometimes easier than manually packaging all remote imports.
+
+This can be used like the following:
+
+```ShellSession
+$ dhall-to-nixpkgs directory --fixed-output-derivations ~/proj/dhall-semver
+{ buildDhallDirectoryPackage, buildDhallUrl }:
+  buildDhallDirectoryPackage {
+    name = "proj";
+    src = ~/proj/dhall-semver;
+    file = "package.dhall";
+    source = false;
+    document = false;
+    dependencies = [
+      (buildDhallUrl {
+        url = "https://prelude.dhall-lang.org/v17.0.0/package.dhall";
+        hash = "sha256-ENs8kZwl6QRoM9+Jeo/+JwHcOQ+giT2VjDQwUkvlpD4=";
+        dhallHash = "sha256:10db3c919c25e9046833df897a8ffe2701dc390fa0893d958c3430524be5a43e";
+        })
+      ];
+    }
+```
+
+Here, `dhall-semver`'s `Prelude` dependency is fetched and built with the
+`buildDhallUrl` helper function, instead of being passed in as a function
+argument.
+
+## Overriding dependency versions {#ssec-dhall-overriding-dependency-versions}
+
+Suppose that we change our `true.dhall` example expression to depend on an older
+version of the Prelude (19.0.0):
+
+```dhall
+-- ./true.dhall
+
+let Prelude =
+      https://prelude.dhall-lang.org/v19.0.0/package.dhall
+        sha256:eb693342eb769f782174157eba9b5924cf8ac6793897fc36a31ccbd6f56dafe2
+
+in  Prelude.Bool.not False
+```
+
+If we try to rebuild that expression the build will fail:
+
+```ShellSession
+$ nix build --file ./example.nix dhallPackages.true
+builder for '/nix/store/0f1hla7ff1wiaqyk1r2ky4wnhnw114fi-true.drv' failed with exit code 1; last 10 log lines:
+
+  Dhall was compiled without the 'with-http' flag.
+
+  The requested URL was: https://prelude.dhall-lang.org/v19.0.0/package.dhall
+
+
+  4│       https://prelude.dhall-lang.org/v19.0.0/package.dhall
+  5│         sha256:eb693342eb769f782174157eba9b5924cf8ac6793897fc36a31ccbd6f56dafe2
+
+  /nix/store/rsab4y99h14912h4zplqx2iizr5n4rc2-true.dhall:4:7
+[1 built (1 failed), 0.0 MiB DL]
+error: build of '/nix/store/0f1hla7ff1wiaqyk1r2ky4wnhnw114fi-true.drv' failed
+```
+
+… because the default Prelude selected by Nixpkgs revision
+`94b2848559b12a8ed1fe433084686b2a81123c99is` is version 20.1.0, which doesn't
+have the same integrity check as version 19.0.0.  This means that version
+19.0.0 is not cached and the interpreter is not allowed to fall back to
+importing the URL.
+
+However, we can override the default Prelude version by using `dhall-to-nixpkgs`
+to create a Dhall package for our desired Prelude:
+
+```ShellSession
+$ dhall-to-nixpkgs github https://github.com/dhall-lang/dhall-lang.git \
+    --name Prelude \
+    --directory Prelude \
+    --rev v19.0.0 \
+    > Prelude.nix
+```
+
+… and then referencing that package in our Dhall overlay, by either overriding
+the Prelude globally for all packages, like this:
+
+```nix
+{
+  dhallOverrides = self: super: {
+    true = self.callPackage ./true.nix { };
+
+    Prelude = self.callPackage ./Prelude.nix { };
+  };
+}
+```
+
+… or selectively overriding the Prelude dependency for just the `true` package,
+like this:
+
+```nix
+{
+  dhallOverrides = self: super: {
+    true = self.callPackage ./true.nix {
+      Prelude = self.callPackage ./Prelude.nix { };
+    };
+  };
+}
+```
+
+## Overrides {#ssec-dhall-overrides}
+
+You can override any of the arguments to `buildDhallGitHubPackage` or
+`buildDhallDirectoryPackage` using the `overridePackage` attribute of a package.
+For example, suppose we wanted to selectively enable `source = true` just for the Prelude.  We can do that like this:
+
+```nix
+{
+  dhallOverrides = self: super: {
+    Prelude = super.Prelude.overridePackage { source = true; };
+
+    # ...
+  };
+}
+```
+
+[semantic-integrity-checks]: https://docs.dhall-lang.org/tutorials/Language-Tour.html#installing-packages
diff --git a/nixpkgs/doc/languages-frameworks/dotnet.section.md b/nixpkgs/doc/languages-frameworks/dotnet.section.md
new file mode 100644
index 000000000000..a4e9d6cf9a6c
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/dotnet.section.md
@@ -0,0 +1,260 @@
+# Dotnet {#dotnet}
+
+## Local Development Workflow {#local-development-workflow}
+
+For local development, it's recommended to use nix-shell to create a dotnet environment:
+
+```nix
+# shell.nix
+with import <nixpkgs> {};
+
+mkShell {
+  name = "dotnet-env";
+  packages = [
+    dotnet-sdk
+  ];
+}
+```
+
+### Using many sdks in a workflow {#using-many-sdks-in-a-workflow}
+
+It's very likely that more than one sdk will be needed on a given project. Dotnet provides several different frameworks (E.g dotnetcore, aspnetcore, etc.) as well as many versions for a given framework. Normally, dotnet is able to fetch a framework and install it relative to the executable. However, this would mean writing to the nix store in nixpkgs, which is read-only. To support the many-sdk use case, one can compose an environment using `dotnetCorePackages.combinePackages`:
+
+```nix
+with import <nixpkgs> {};
+
+mkShell {
+  name = "dotnet-env";
+  packages = [
+    (with dotnetCorePackages; combinePackages [
+      sdk_6_0
+      sdk_7_0
+    ])
+  ];
+}
+```
+
+This will produce a dotnet installation that has the dotnet 6.0 7.0 sdk. The first sdk listed will have it's cli utility present in the resulting environment. Example info output:
+
+```ShellSession
+$ dotnet --info
+.NET SDK:
+ Version:   7.0.202
+ Commit:    6c74320bc3
+
+Środowisko uruchomieniowe:
+ OS Name:     nixos
+ OS Version:  23.05
+ OS Platform: Linux
+ RID:         linux-x64
+ Base Path:   /nix/store/n2pm44xq20hz7ybsasgmd7p3yh31gnh4-dotnet-sdk-7.0.202/sdk/7.0.202/
+
+Host:
+  Version:      7.0.4
+  Architecture: x64
+  Commit:       0a396acafe
+
+.NET SDKs installed:
+  6.0.407 [/nix/store/3b19303vwrhv0xxz1hg355c7f2hgxxgd-dotnet-core-combined/sdk]
+  7.0.202 [/nix/store/3b19303vwrhv0xxz1hg355c7f2hgxxgd-dotnet-core-combined/sdk]
+
+.NET runtimes installed:
+  Microsoft.AspNetCore.App 6.0.15 [/nix/store/3b19303vwrhv0xxz1hg355c7f2hgxxgd-dotnet-core-combined/shared/Microsoft.AspNetCore.App]
+  Microsoft.AspNetCore.App 7.0.4 [/nix/store/3b19303vwrhv0xxz1hg355c7f2hgxxgd-dotnet-core-combined/shared/Microsoft.AspNetCore.App]
+  Microsoft.NETCore.App 6.0.15 [/nix/store/3b19303vwrhv0xxz1hg355c7f2hgxxgd-dotnet-core-combined/shared/Microsoft.NETCore.App]
+  Microsoft.NETCore.App 7.0.4 [/nix/store/3b19303vwrhv0xxz1hg355c7f2hgxxgd-dotnet-core-combined/shared/Microsoft.NETCore.App]
+
+Other architectures found:
+  None
+
+Environment variables:
+  Not set
+
+global.json file:
+  Not found
+
+Learn more:
+  https://aka.ms/dotnet/info
+
+Download .NET:
+  https://aka.ms/dotnet/download
+```
+
+## dotnet-sdk vs dotnetCorePackages.sdk {#dotnet-sdk-vs-dotnetcorepackages.sdk}
+
+The `dotnetCorePackages.sdk_X_Y` is preferred over the old dotnet-sdk as both major and minor version are very important for a dotnet environment. If a given minor version isn't present (or was changed), then this will likely break your ability to build a project.
+
+## dotnetCorePackages.sdk vs dotnetCorePackages.runtime vs dotnetCorePackages.aspnetcore {#dotnetcorepackages.sdk-vs-dotnetcorepackages.runtime-vs-dotnetcorepackages.aspnetcore}
+
+The `dotnetCorePackages.sdk` contains both a runtime and the full sdk of a given version. The `runtime` and `aspnetcore` packages are meant to serve as minimal runtimes to deploy alongside already built applications.
+
+## Packaging a Dotnet Application {#packaging-a-dotnet-application}
+
+To package Dotnet applications, you can use `buildDotnetModule`. This has similar arguments to `stdenv.mkDerivation`, with the following additions:
+
+* `projectFile` is used for specifying the dotnet project file, relative to the source root. These have `.sln` (entire solution) or `.csproj` (single project) file extensions. This can be a list of multiple projects as well. When omitted, will attempt to find and build the solution (`.sln`). If running into problems, make sure to set it to a file (or a list of files) with the `.csproj` extension - building applications as entire solutions is not fully supported by the .NET CLI.
+* `nugetDeps` takes either a path to a `deps.nix` file, or a derivation. The `deps.nix` file can be generated using the script attached to `passthru.fetch-deps`. If the argument is a derivation, it will be used directly and assume it has the same output as `mkNugetDeps`.
+::: {.note}
+For more detail about managing the `deps.nix` file, see [Generating and updating NuGet dependencies](#generating-and-updating-nuget-dependencies)
+:::
+
+* `packNupkg` is used to pack project as a `nupkg`, and installs it to `$out/share`. If set to `true`, the derivation can be used as a dependency for another dotnet project by adding it to `projectReferences`.
+* `projectReferences` can be used to resolve `ProjectReference` project items. Referenced projects can be packed with `buildDotnetModule` by setting the `packNupkg = true` attribute and passing a list of derivations to `projectReferences`. Since we are sharing referenced projects as NuGets they must be added to csproj/fsproj files as `PackageReference` as well.
+ For example, your project has a local dependency:
+ ```xml
+     <ProjectReference Include="../foo/bar.fsproj" />
+ ```
+ To enable discovery through `projectReferences` you would need to add:
+ ```xml
+     <ProjectReference Include="../foo/bar.fsproj" />
+     <PackageReference Include="bar" Version="*" Condition=" '$(ContinuousIntegrationBuild)'=='true' "/>
+  ```
+* `executables` is used to specify which executables get wrapped to `$out/bin`, relative to `$out/lib/$pname`. If this is unset, all executables generated will get installed. If you do not want to install any, set this to `[]`. This gets done in the `preFixup` phase.
+* `runtimeDeps` is used to wrap libraries into `LD_LIBRARY_PATH`. This is how dotnet usually handles runtime dependencies.
+* `buildType` is used to change the type of build. Possible values are `Release`, `Debug`, etc. By default, this is set to `Release`.
+* `selfContainedBuild` allows to enable the [self-contained](https://docs.microsoft.com/en-us/dotnet/core/deploying/#publish-self-contained) build flag. By default, it is set to false and generated applications have a dependency on the selected dotnet runtime. If enabled, the dotnet runtime is bundled into the executable and the built app has no dependency on .NET.
+* `useAppHost` will enable creation of a binary executable that runs the .NET application using the specified root. More info in [Microsoft docs](https://learn.microsoft.com/en-us/dotnet/core/deploying/#publish-framework-dependent). Enabled by default.
+* `useDotnetFromEnv` will change the binary wrapper so that it uses the .NET from the environment. The runtime specified by `dotnet-runtime` is given as a fallback in case no .NET is installed in the user's environment. This is most useful for .NET global tools and LSP servers, which often extend the .NET CLI and their runtime should match the users' .NET runtime.
+* `dotnet-sdk` is useful in cases where you need to change what dotnet SDK is being used. You can also set this to the result of `dotnetSdkPackages.combinePackages`, if the project uses multiple SDKs to build.
+* `dotnet-runtime` is useful in cases where you need to change what dotnet runtime is being used. This can be either a regular dotnet runtime, or an aspnetcore.
+* `dotnet-test-sdk` is useful in cases where unit tests expect a different dotnet SDK. By default, this is set to the `dotnet-sdk` attribute.
+* `testProjectFile` is useful in cases where the regular project file does not contain the unit tests. It gets restored and build, but not installed. You may need to regenerate your nuget lockfile after setting this. Note that if set, only tests from this project are executed.
+* `disabledTests` is used to disable running specific unit tests. This gets passed as: `dotnet test --filter "FullyQualifiedName!={}"`, to ensure compatibility with all unit test frameworks.
+* `dotnetRestoreFlags` can be used to pass flags to `dotnet restore`.
+* `dotnetBuildFlags` can be used to pass flags to `dotnet build`.
+* `dotnetTestFlags` can be used to pass flags to `dotnet test`. Used only if `doCheck` is set to `true`.
+* `dotnetInstallFlags` can be used to pass flags to `dotnet install`.
+* `dotnetPackFlags` can be used to pass flags to `dotnet pack`. Used only if `packNupkg` is set to `true`.
+* `dotnetFlags` can be used to pass flags to all of the above phases.
+
+When packaging a new application, you need to fetch its dependencies. Create an empty `deps.nix`, set `nugetDeps = ./deps.nix`, then run `nix-build -A package.fetch-deps` to generate a script that will build the lockfile for you.
+
+Here is an example `default.nix`, using some of the previously discussed arguments:
+```nix
+{ lib, buildDotnetModule, dotnetCorePackages, ffmpeg }:
+
+let
+  referencedProject = import ../../bar { /* ... */ };
+in buildDotnetModule rec {
+  pname = "someDotnetApplication";
+  version = "0.1";
+
+  src = ./.;
+
+  projectFile = "src/project.sln";
+  # File generated with `nix-build -A package.passthru.fetch-deps`.
+  # To run fetch-deps when this file does not yet exist, set nugetDeps to null
+  nugetDeps = ./deps.nix;
+
+  projectReferences = [ referencedProject ]; # `referencedProject` must contain `nupkg` in the folder structure.
+
+  dotnet-sdk = dotnetCorePackages.sdk_6_0;
+  dotnet-runtime = dotnetCorePackages.runtime_6_0;
+
+  executables = [ "foo" ]; # This wraps "$out/lib/$pname/foo" to `$out/bin/foo`.
+  executables = []; # Don't install any executables.
+
+  packNupkg = true; # This packs the project as "foo-0.1.nupkg" at `$out/share`.
+
+  runtimeDeps = [ ffmpeg ]; # This will wrap ffmpeg's library path into `LD_LIBRARY_PATH`.
+}
+```
+
+Keep in mind that you can tag the [`@NixOS/dotnet`](https://github.com/orgs/nixos/teams/dotnet) team for help and code review.
+
+## Dotnet global tools {#dotnet-global-tools}
+
+[.NET Global tools](https://learn.microsoft.com/en-us/dotnet/core/tools/global-tools) are a mechanism provided by the dotnet CLI to install .NET binaries from Nuget packages.
+
+They can be installed either as a global tool for the entire system, or as a local tool specific to project.
+
+The local installation is the easiest and works on NixOS in the same way as on other Linux distributions.
+[See dotnet documentation](https://learn.microsoft.com/en-us/dotnet/core/tools/global-tools#install-a-local-tool) to learn more.
+
+[The global installation method](https://learn.microsoft.com/en-us/dotnet/core/tools/global-tools#install-a-global-tool)
+should also work most of the time. You have to remember to update the `PATH`
+value to the location the tools are installed to (the CLI will inform you about it during installation) and also set
+the `DOTNET_ROOT` value, so that the tool can find the .NET SDK package.
+You can find the path to the SDK by running `nix eval --raw nixpkgs#dotnet-sdk` (substitute the `dotnet-sdk` package for
+another if a different SDK version is needed).
+
+This method is not recommended on NixOS, since it's not declarative and involves installing binaries not made for NixOS,
+which will not always work.
+
+The third, and preferred way, is packaging the tool into a Nix derivation.
+
+### Packaging Dotnet global tools {#packaging-dotnet-global-tools}
+
+Dotnet global tools are standard .NET binaries, just made available through a special
+NuGet package. Therefore, they can be built and packaged like every .NET application,
+using `buildDotnetModule`.
+
+If however the source is not available or difficult to build, the
+`buildDotnetGlobalTool` helper can be used, which will package the tool
+straight from its NuGet package.
+
+This helper has the same arguments as `buildDotnetModule`, with a few differences:
+
+* `pname` and `version` are required, and will be used to find the NuGet package of the tool
+* `nugetName` can be used to override the NuGet package name that will be downloaded, if it's different from `pname`
+* `nugetSha256` is the hash of the fetched NuGet package. Set this to `lib.fakeHash256` for the first build, and it will error out, giving you the proper hash. Also remember to update it during version updates (it will not error out if you just change the version while having a fetched package in `/nix/store`)
+* `dotnet-runtime` is set to `dotnet-sdk` by default. When changing this, remember that .NET tools fetched from NuGet require an SDK.
+
+Here is an example of packaging `pbm`, an unfree binary without source available:
+```nix
+{ buildDotnetGlobalTool, lib }:
+
+buildDotnetGlobalTool {
+  pname = "pbm";
+  version = "1.3.1";
+
+  nugetSha256 = "sha256-ZG2HFyKYhVNVYd2kRlkbAjZJq88OADe3yjxmLuxXDUo=";
+
+  meta = {
+    homepage = "https://cmd.petabridge.com/index.html";
+    changelog = "https://cmd.petabridge.com/articles/RELEASE_NOTES.html";
+    license = lib.licenses.unfree;
+    platforms = lib.platforms.linux;
+  };
+}
+```
+## Generating and updating NuGet dependencies {#generating-and-updating-nuget-dependencies}
+
+First, restore the packages to the `out` directory, ensure you have cloned
+the upstream repository and you are inside it.
+
+```bash
+$ dotnet restore --packages out
+  Determining projects to restore...
+  Restored /home/lychee/Celeste64/Celeste64.csproj (in 1.21 sec).
+```
+
+Next, use `nuget-to-nix` tool provided in nixpkgs to generate a lockfile to `deps.nix` from
+the packages inside the `out` directory.
+
+```bash
+$ nuget-to-nix out > deps.nix
+```
+Which `nuget-to-nix` will generate an output similar to below
+```nix
+{ fetchNuGet }: [
+  (fetchNuGet { pname = "FosterFramework"; version = "0.1.15-alpha"; sha256 = "0pzsdfbsfx28xfqljcwy100xhbs6wyx0z1d5qxgmv3l60di9xkll"; })
+  (fetchNuGet { pname = "Microsoft.AspNetCore.App.Runtime.linux-x64"; version = "8.0.1"; sha256 = "1gjz379y61ag9whi78qxx09bwkwcznkx2mzypgycibxk61g11da1"; })
+  (fetchNuGet { pname = "Microsoft.NET.ILLink.Tasks"; version = "8.0.1"; sha256 = "1drbgqdcvbpisjn8mqfgba1pwb6yri80qc4mfvyczqwrcsj5k2ja"; })
+  (fetchNuGet { pname = "Microsoft.NETCore.App.Runtime.linux-x64"; version = "8.0.1"; sha256 = "1g5b30f4l8a1zjjr3b8pk9mcqxkxqwa86362f84646xaj4iw3a4d"; })
+  (fetchNuGet { pname = "SharpGLTF.Core"; version = "1.0.0-alpha0031"; sha256 = "0ln78mkhbcxqvwnf944hbgg24vbsva2jpih6q3x82d3h7rl1pkh6"; })
+  (fetchNuGet { pname = "SharpGLTF.Runtime"; version = "1.0.0-alpha0031"; sha256 = "0lvb3asi3v0n718qf9y367km7qpkb9wci38y880nqvifpzllw0jg"; })
+  (fetchNuGet { pname = "Sledge.Formats"; version = "1.2.2"; sha256 = "1y0l66m9rym0p1y4ifjlmg3j9lsmhkvbh38frh40rpvf1axn2dyh"; })
+  (fetchNuGet { pname = "Sledge.Formats.Map"; version = "1.1.5"; sha256 = "1bww60hv9xcyxpvkzz5q3ybafdxxkw6knhv97phvpkw84pd0jil6"; })
+  (fetchNuGet { pname = "System.Numerics.Vectors"; version = "4.5.0"; sha256 = "1kzrj37yzawf1b19jq0253rcs8hsq1l2q8g69d7ipnhzb0h97m59"; })
+]
+```
+
+Finally, you move the `deps.nix` file to the appropriate location to be used by `nugetDeps`, then you're all set!
+
+If you ever need to update the dependencies of a package, you instead do
+
+* `nix-build -A package.fetch-deps` to generate the update script for `package`
+* Run `./result deps.nix` to regenerate the lockfile to `deps.nix`, keep in mind if a location isn't provided, it will write to a temporary path instead
+* Finally, move the file where needed and look at its contents to confirm it has updated the dependencies.
+
diff --git a/nixpkgs/doc/languages-frameworks/emscripten.section.md b/nixpkgs/doc/languages-frameworks/emscripten.section.md
new file mode 100644
index 000000000000..9ce48db2c2de
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/emscripten.section.md
@@ -0,0 +1,167 @@
+# Emscripten {#emscripten}
+
+[Emscripten](https://github.com/kripken/emscripten): An LLVM-to-JavaScript Compiler
+
+If you want to work with `emcc`, `emconfigure` and `emmake` as you are used to from Ubuntu and similar distributions,
+
+```console
+nix-shell -p emscripten
+```
+
+A few things to note:
+
+* `export EMCC_DEBUG=2` is nice for debugging
+* The build artifact cache in `~/.emscripten` sometimes creates issues and needs to be removed from time to time
+
+## Examples {#declarative-usage}
+
+Let's see two different examples from `pkgs/top-level/emscripten-packages.nix`:
+
+* `pkgs.zlib.override`
+* `pkgs.buildEmscriptenPackage`
+
+A special requirement of the `pkgs.buildEmscriptenPackage` is the `doCheck = true`.
+This means each Emscripten package requires that a [`checkPhase`](#ssec-check-phase) is implemented.
+
+* Use `export EMCC_DEBUG=2` from within a phase to get more detailed debug output what is going wrong.
+* The cache at `~/.emscripten` requires to set `HOME=$TMPDIR` in individual phases.
+  This makes compilation slower but also more deterministic.
+
+::: {.example #usage-1-pkgs.zlib.override}
+
+# Using `pkgs.zlib.override {}`
+
+This example uses `zlib` from Nixpkgs, but instead of compiling **C** to **ELF** it compiles **C** to **JavaScript** since we were using `pkgs.zlib.override` and changed `stdenv` to `pkgs.emscriptenStdenv`.
+
+A few adaptions and hacks were put in place to make it work.
+One advantage is that when `pkgs.zlib` is updated, it will automatically update this package as well.
+
+
+```nix
+(pkgs.zlib.override {
+  stdenv = pkgs.emscriptenStdenv;
+}).overrideAttrs
+(old: rec {
+  buildInputs = old.buildInputs ++ [ pkg-config ];
+  # we need to reset this setting!
+  env = (old.env or { }) // { NIX_CFLAGS_COMPILE = ""; };
+  configurePhase = ''
+    # FIXME: Some tests require writing at $HOME
+    HOME=$TMPDIR
+    runHook preConfigure
+
+    #export EMCC_DEBUG=2
+    emconfigure ./configure --prefix=$out --shared
+
+    runHook postConfigure
+  '';
+  dontStrip = true;
+  outputs = [ "out" ];
+  buildPhase = ''
+    emmake make
+  '';
+  installPhase = ''
+    emmake make install
+  '';
+  checkPhase = ''
+    echo "================= testing zlib using node ================="
+
+    echo "Compiling a custom test"
+    set -x
+    emcc -O2 -s EMULATE_FUNCTION_POINTER_CASTS=1 test/example.c -DZ_SOLO \
+    libz.so.${old.version} -I . -o example.js
+
+    echo "Using node to execute the test"
+    ${pkgs.nodejs}/bin/node ./example.js
+
+    set +x
+    if [ $? -ne 0 ]; then
+      echo "test failed for some reason"
+      exit 1;
+    else
+      echo "it seems to work! very good."
+    fi
+    echo "================= /testing zlib using node ================="
+  '';
+
+  postPatch = pkgs.lib.optionalString pkgs.stdenv.isDarwin ''
+    substituteInPlace configure \
+      --replace-fail '/usr/bin/libtool' 'ar' \
+      --replace-fail 'AR="libtool"' 'AR="ar"' \
+      --replace-fail 'ARFLAGS="-o"' 'ARFLAGS="-r"'
+  '';
+})
+```
+
+:::{.example #usage-2-pkgs.buildemscriptenpackage}
+
+# Using `pkgs.buildEmscriptenPackage {}`
+
+This `xmlmirror` example features an Emscripten package that is defined completely from this context and no `pkgs.zlib.override` is used.
+
+```nix
+pkgs.buildEmscriptenPackage rec {
+  name = "xmlmirror";
+
+  buildInputs = [ pkg-config autoconf automake libtool gnumake libxml2 nodejs openjdk json_c ];
+  nativeBuildInputs = [ pkg-config zlib ];
+
+  src = pkgs.fetchgit {
+    url = "https://gitlab.com/odfplugfest/xmlmirror.git";
+    rev = "4fd7e86f7c9526b8f4c1733e5c8b45175860a8fd";
+    hash = "sha256-i+QgY+5PYVg5pwhzcDnkfXAznBg3e8sWH2jZtixuWsk=";
+  };
+
+  configurePhase = ''
+    rm -f fastXmlLint.js*
+    # a fix for ERROR:root:For asm.js, TOTAL_MEMORY must be a multiple of 16MB, was 234217728
+    # https://gitlab.com/odfplugfest/xmlmirror/issues/8
+    sed -e "s/TOTAL_MEMORY=234217728/TOTAL_MEMORY=268435456/g" -i Makefile.emEnv
+    # https://github.com/kripken/emscripten/issues/6344
+    # https://gitlab.com/odfplugfest/xmlmirror/issues/9
+    sed -e "s/\$(JSONC_LDFLAGS) \$(ZLIB_LDFLAGS) \$(LIBXML20_LDFLAGS)/\$(JSONC_LDFLAGS) \$(LIBXML20_LDFLAGS) \$(ZLIB_LDFLAGS) /g" -i Makefile.emEnv
+    # https://gitlab.com/odfplugfest/xmlmirror/issues/11
+    sed -e "s/-o fastXmlLint.js/-s EXTRA_EXPORTED_RUNTIME_METHODS='[\"ccall\", \"cwrap\"]' -o fastXmlLint.js/g" -i Makefile.emEnv
+  '';
+
+  buildPhase = ''
+    HOME=$TMPDIR
+    make -f Makefile.emEnv
+  '';
+
+  outputs = [ "out" "doc" ];
+
+  installPhase = ''
+    mkdir -p $out/share
+    mkdir -p $doc/share/${name}
+
+    cp Demo* $out/share
+    cp -R codemirror-5.12 $out/share
+    cp fastXmlLint.js* $out/share
+    cp *.xsd $out/share
+    cp *.js $out/share
+    cp *.xhtml $out/share
+    cp *.html $out/share
+    cp *.json $out/share
+    cp *.rng $out/share
+    cp README.md $doc/share/${name}
+  '';
+  checkPhase = ''
+
+  '';
+}
+```
+
+:::
+
+## Debugging {#declarative-debugging}
+
+Use `nix-shell -I nixpkgs=/some/dir/nixpkgs -A emscriptenPackages.libz` and from there you can go trough the individual steps. This makes it easy to build a good `unit test` or list the files of the project.
+
+1. `nix-shell -I nixpkgs=/some/dir/nixpkgs -A emscriptenPackages.libz`
+2. `cd /tmp/`
+3. `unpackPhase`
+4. cd libz-1.2.3
+5. `configurePhase`
+6. `buildPhase`
+7. ... happy hacking...
diff --git a/nixpkgs/doc/languages-frameworks/gnome.section.md b/nixpkgs/doc/languages-frameworks/gnome.section.md
new file mode 100644
index 000000000000..6bf867b21abe
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/gnome.section.md
@@ -0,0 +1,220 @@
+# GNOME {#sec-language-gnome}
+
+## Packaging GNOME applications {#ssec-gnome-packaging}
+
+Programs in the GNOME universe are written in various languages but they all use GObject-based libraries like GLib, GTK or GStreamer. These libraries are often modular, relying on looking into certain directories to find their modules. However, due to Nix’s specific file system organization, this will fail without our intervention. Fortunately, the libraries usually allow overriding the directories through environment variables, either natively or thanks to a patch in nixpkgs. [Wrapping](#fun-wrapProgram) the executables to ensure correct paths are available to the application constitutes a significant part of packaging a modern desktop application. In this section, we will describe various modules needed by such applications, environment variables needed to make the modules load, and finally a script that will do the work for us.
+
+### Settings {#ssec-gnome-settings}
+
+[GSettings](https://developer.gnome.org/gio/stable/GSettings.html) API is often used for storing settings. GSettings schemas are required, to know the type and other metadata of the stored values. GLib looks for `glib-2.0/schemas/gschemas.compiled` files inside the directories of `XDG_DATA_DIRS`.
+
+On Linux, GSettings API is implemented using [dconf](https://wiki.gnome.org/Projects/dconf) backend. You will need to add `dconf` [GIO module](#ssec-gnome-gio-modules) to `GIO_EXTRA_MODULES` variable, otherwise the `memory` backend will be used and the saved settings will not be persistent.
+
+Last you will need the dconf database D-Bus service itself. You can enable it using `programs.dconf.enable`.
+
+Some applications will also require `gsettings-desktop-schemas` for things like reading proxy configuration or user interface customization. This dependency is often not mentioned by upstream, you should grep for `org.gnome.desktop` and `org.gnome.system` to see if the schemas are needed.
+
+### GIO modules {#ssec-gnome-gio-modules}
+
+GLib’s [GIO](https://developer.gnome.org/gio/stable/ch01.html) library supports several [extension points](https://developer.gnome.org/gio/stable/extending-gio.html). Notably, they allow:
+
+* implementing settings backends (already [mentioned](#ssec-gnome-settings))
+* adding TLS support
+* proxy settings
+* virtual file systems
+
+The modules are typically installed to `lib/gio/modules/` directory of a package and you need to add them to `GIO_EXTRA_MODULES` if you need any of those features.
+
+In particular, we recommend:
+
+* adding `dconf.lib` for any software on Linux that reads [GSettings](#ssec-gnome-settings) (even transitively through e.g. GTK’s file manager)
+* adding `glib-networking` for any software that accesses network using GIO or libsoup – glib-networking contains a module that implements TLS support and loads system-wide proxy settings
+
+To allow software to use various virtual file systems, `gvfs` package can be also added. But that is usually an optional feature so we typically use `gvfs` from the system (e.g. installed globally using NixOS module).
+
+### GdkPixbuf loaders {#ssec-gnome-gdk-pixbuf-loaders}
+
+GTK applications typically use [GdkPixbuf](https://gitlab.gnome.org/GNOME/gdk-pixbuf/) to load images. But `gdk-pixbuf` package only supports basic bitmap formats like JPEG, PNG or TIFF, requiring to use third-party loader modules for other formats. This is especially painful since GTK itself includes SVG icons, which cannot be rendered without a loader provided by `librsvg`.
+
+Unlike other libraries mentioned in this section, GdkPixbuf only supports a single value in its controlling environment variable `GDK_PIXBUF_MODULE_FILE`. It is supposed to point to a cache file containing information about the available loaders. Each loader package will contain a `lib/gdk-pixbuf-2.0/2.10.0/loaders.cache` file describing the default loaders in `gdk-pixbuf` package plus the loader contained in the package itself. If you want to use multiple third-party loaders, you will need to create your own cache file manually. Fortunately, this is pretty rare as [not many loaders exist](https://gitlab.gnome.org/federico/gdk-pixbuf-survey/blob/master/src/modules.md).
+
+`gdk-pixbuf` contains [a setup hook](#ssec-gnome-hooks-gdk-pixbuf) that sets `GDK_PIXBUF_MODULE_FILE` from dependencies but as mentioned in further section, it is pretty limited. Loaders should propagate this setup hook.
+
+### Icons {#ssec-gnome-icons}
+
+When an application uses icons, an icon theme should be available in `XDG_DATA_DIRS` during runtime. The package for the default, icon-less [hicolor-icon-theme](https://www.freedesktop.org/wiki/Software/icon-theme/) (should be propagated by every icon theme) contains [a setup hook](#ssec-gnome-hooks-hicolor-icon-theme) that will pick up icon themes from `buildInputs` and add their datadirs to `XDG_ICON_DIRS` environment variable (this is Nixpkgs specific, not actually a XDG standard variable). Unfortunately, relying on that would mean every user has to download the theme included in the package expression no matter their preference. For that reason, we leave the installation of icon theme on the user. If you use one of the desktop environments, you probably already have an icon theme installed.
+
+In the rare case you need to use icons from dependencies (e.g. when an app forces an icon theme), you can use the following to pick them up:
+
+```nix
+{
+  buildInputs = [
+    pantheon.elementary-icon-theme
+  ];
+  preFixup = ''
+    gappsWrapperArgs+=(
+      # The icon theme is hardcoded.
+      --prefix XDG_DATA_DIRS : "$XDG_ICON_DIRS"
+    )
+  '';
+}
+```
+
+To avoid costly file system access when locating icons, GTK, [as well as Qt](https://woboq.com/blog/qicon-reads-gtk-icon-cache-in-qt57.html), can rely on `icon-theme.cache` files from the themes' top-level directories. These files are generated using `gtk-update-icon-cache`, which is expected to be run whenever an icon is added or removed to an icon theme (typically an application icon into `hicolor` theme) and some programs do indeed run this after icon installation. However, since packages are installed into their own prefix by Nix, this would lead to conflicts. For that reason, `gtk3` provides a [setup hook](#ssec-gnome-hooks-gtk-drop-icon-theme-cache) that will clean the file from installation. Since most applications only ship their own icon that will be loaded on start-up, it should not affect them too much. On the other hand, icon themes are much larger and more widely used so we need to cache them. Because we recommend installing icon themes globally, we will generate the cache files from all packages in a profile using a NixOS module. You can enable the cache generation using `gtk.iconCache.enable` option if your desktop environment does not already do that.
+
+### Packaging icon themes {#ssec-icon-theme-packaging}
+
+Icon themes may inherit from other icon themes. The inheritance is specified using the `Inherits` key in the `index.theme` file distributed with the icon theme. According to the [icon theme specification](https://specifications.freedesktop.org/icon-theme-spec/icon-theme-spec-latest.html), icons not provided by the theme are looked for in its parent icon themes. Therefore the parent themes should be installed as dependencies for a more complete experience regarding the icon sets used.
+
+The package `hicolor-icon-theme` provides a setup hook which makes symbolic links for the parent themes into the directory `share/icons` of the current theme directory in the nix store, making sure they can be found at runtime. For that to work the packages providing parent icon themes should be listed as propagated build dependencies, together with `hicolor-icon-theme`.
+
+Also make sure that `icon-theme.cache` is installed for each theme provided by the package, and set `dontDropIconThemeCache` to `true` so that the cache file is not removed by the `gtk3` setup hook.
+
+### GTK Themes {#ssec-gnome-themes}
+
+Previously, a GTK theme needed to be in `XDG_DATA_DIRS`. This is no longer necessary for most programs since GTK incorporated Adwaita theme. Some programs (for example, those designed for [elementary HIG](https://docs.elementary.io/hig)) might require a special theme like `pantheon.elementary-gtk-theme`.
+
+### GObject introspection typelibs {#ssec-gnome-typelibs}
+
+[GObject introspection](https://wiki.gnome.org/Projects/GObjectIntrospection) allows applications to use C libraries in other languages easily. It does this through `typelib` files searched in `GI_TYPELIB_PATH`.
+
+### Various plug-ins {#ssec-gnome-plugins}
+
+If your application uses [GStreamer](https://gstreamer.freedesktop.org/) or [Grilo](https://wiki.gnome.org/Projects/Grilo), you should set `GST_PLUGIN_SYSTEM_PATH_1_0` and `GRL_PLUGIN_PATH`, respectively.
+
+## Onto `wrapGAppsHook` {#ssec-gnome-hooks}
+
+Given the requirements above, the package expression would become messy quickly:
+
+```nix
+{
+  preFixup = ''
+    for f in $(find $out/bin/ $out/libexec/ -type f -executable); do
+      wrapProgram "$f" \
+        --prefix GIO_EXTRA_MODULES : "${getLib dconf}/lib/gio/modules" \
+        --prefix XDG_DATA_DIRS : "$out/share" \
+        --prefix XDG_DATA_DIRS : "$out/share/gsettings-schemas/${name}" \
+        --prefix XDG_DATA_DIRS : "${gsettings-desktop-schemas}/share/gsettings-schemas/${gsettings-desktop-schemas.name}" \
+        --prefix XDG_DATA_DIRS : "${hicolor-icon-theme}/share" \
+        --prefix GI_TYPELIB_PATH : "${lib.makeSearchPath "lib/girepository-1.0" [ pango json-glib ]}"
+    done
+  '';
+}
+```
+
+Fortunately, there is [`wrapGAppsHook`]{#ssec-gnome-hooks-wrapgappshook}. It works in conjunction with other setup hooks that populate environment variables, and it will then wrap all executables in `bin` and `libexec` directories using said variables.
+
+For convenience, it also adds `dconf.lib` for a GIO module implementing a GSettings backend using `dconf`, `gtk3` for GSettings schemas, and `librsvg` for GdkPixbuf loader to the closure. There is also [`wrapGAppsHook4`]{#ssec-gnome-hooks-wrapgappshook4}, which replaces GTK 3 with GTK 4. And in case you are packaging a program without a graphical interface, you might want to use [`wrapGAppsNoGuiHook`]{#ssec-gnome-hooks-wrapgappsnoguihook}, which runs the same script as `wrapGAppsHook` but does not bring `gtk3` and `librsvg` into the closure.
+
+- `wrapGAppsHook` itself will add the package’s `share` directory to `XDG_DATA_DIRS`.
+
+- []{#ssec-gnome-hooks-glib} `glib` setup hook will populate `GSETTINGS_SCHEMAS_PATH` and then `wrapGAppsHook` will prepend it to `XDG_DATA_DIRS`.
+
+- []{#ssec-gnome-hooks-gdk-pixbuf} `gdk-pixbuf` setup hook will populate `GDK_PIXBUF_MODULE_FILE` with the path to biggest `loaders.cache` file from the dependencies containing [GdkPixbuf loaders](#ssec-gnome-gdk-pixbuf-loaders). This works fine when there are only two packages containing loaders (`gdk-pixbuf` and e.g. `librsvg`) – it will choose the second one, reasonably expecting that it will be bigger since it describes extra loader in addition to the default ones. But when there are more than two loader packages, this logic will break. One possible solution would be constructing a custom cache file for each package containing a program like `services/x11/gdk-pixbuf.nix` NixOS module does. `wrapGAppsHook` copies the `GDK_PIXBUF_MODULE_FILE` environment variable into the produced wrapper.
+
+- []{#ssec-gnome-hooks-gtk-drop-icon-theme-cache} One of `gtk3`’s setup hooks will remove `icon-theme.cache` files from package’s icon theme directories to avoid conflicts. Icon theme packages should prevent this with `dontDropIconThemeCache = true;`.
+
+- []{#ssec-gnome-hooks-dconf} `dconf.lib` is a dependency of `wrapGAppsHook`, which then also adds it to the `GIO_EXTRA_MODULES` variable.
+
+- []{#ssec-gnome-hooks-hicolor-icon-theme} `hicolor-icon-theme`’s setup hook will add icon themes to `XDG_ICON_DIRS`.
+
+- []{#ssec-gnome-hooks-gobject-introspection} `gobject-introspection` setup hook populates `GI_TYPELIB_PATH` variable with `lib/girepository-1.0` directories of dependencies, which is then added to wrapper by `wrapGAppsHook`. It also adds `share` directories of dependencies to `XDG_DATA_DIRS`, which is intended to promote GIR files but it also [pollutes the closures](https://github.com/NixOS/nixpkgs/issues/32790) of packages using `wrapGAppsHook`.
+
+- []{#ssec-gnome-hooks-gst-grl-plugins} Setup hooks of `gst_all_1.gstreamer` and `grilo` will populate the `GST_PLUGIN_SYSTEM_PATH_1_0` and `GRL_PLUGIN_PATH` variables, respectively, which will then be added to the wrapper by `wrapGAppsHook`.
+
+You can also pass additional arguments to `makeWrapper` using `gappsWrapperArgs` in `preFixup` hook:
+
+```nix
+{
+  preFixup = ''
+    gappsWrapperArgs+=(
+      # Thumbnailers
+      --prefix XDG_DATA_DIRS : "${gdk-pixbuf}/share"
+      --prefix XDG_DATA_DIRS : "${librsvg}/share"
+      --prefix XDG_DATA_DIRS : "${shared-mime-info}/share"
+    )
+  '';
+}
+```
+
+## Updating GNOME packages {#ssec-gnome-updating}
+
+Most GNOME package offer [`updateScript`](#var-passthru-updateScript), it is therefore possible to update to latest source tarball by running `nix-shell maintainers/scripts/update.nix --argstr package gnome.nautilus` or even en masse with `nix-shell maintainers/scripts/update.nix --argstr path gnome`. Read the package’s `NEWS` file to see what changed.
+
+## Frequently encountered issues {#ssec-gnome-common-issues}
+
+### `GLib-GIO-ERROR **: 06:04:50.903: No GSettings schemas are installed on the system` {#ssec-gnome-common-issues-no-schemas}
+
+There are no schemas available in `XDG_DATA_DIRS`. Temporarily add a random package containing schemas like `gsettings-desktop-schemas` to `buildInputs`. [`glib`](#ssec-gnome-hooks-glib) and [`wrapGAppsHook`](#ssec-gnome-hooks-wrapgappshook) setup hooks will take care of making the schemas available to application and you will see the actual missing schemas with the [next error](#ssec-gnome-common-issues-missing-schema). Or you can try looking through the source code for the actual schemas used.
+
+### `GLib-GIO-ERROR **: 06:04:50.903: Settings schema ‘org.gnome.foo’ is not installed` {#ssec-gnome-common-issues-missing-schema}
+
+Package is missing some GSettings schemas. You can find out the package containing the schema with `nix-locate org.gnome.foo.gschema.xml` and let the hooks handle the wrapping as [above](#ssec-gnome-common-issues-no-schemas).
+
+### When using `wrapGAppsHook` with special derivers you can end up with double wrapped binaries. {#ssec-gnome-common-issues-double-wrapped}
+
+This is because derivers like `python.pkgs.buildPythonApplication` or `qt5.mkDerivation` have setup-hooks automatically added that produce wrappers with makeWrapper. The simplest way to workaround that is to disable the `wrapGAppsHook` automatic wrapping with `dontWrapGApps = true;` and pass the arguments it intended to pass to makeWrapper to another.
+
+In the case of a Python application it could look like:
+
+```nix
+python3.pkgs.buildPythonApplication {
+  pname = "gnome-music";
+  version = "3.32.2";
+
+  nativeBuildInputs = [
+    wrapGAppsHook
+    gobject-introspection
+    # ...
+  ];
+
+  dontWrapGApps = true;
+
+  # Arguments to be passed to `makeWrapper`, only used by buildPython*
+  preFixup = ''
+    makeWrapperArgs+=("''${gappsWrapperArgs[@]}")
+  '';
+}
+```
+
+And for a QT app like:
+
+```nix
+mkDerivation {
+  pname = "calibre";
+  version = "3.47.0";
+
+  nativeBuildInputs = [
+    wrapGAppsHook
+    qmake
+    # ...
+  ];
+
+  dontWrapGApps = true;
+
+  # Arguments to be passed to `makeWrapper`, only used by qt5’s mkDerivation
+  preFixup = ''
+    qtWrapperArgs+=("''${gappsWrapperArgs[@]}")
+  '';
+}
+```
+
+### I am packaging a project that cannot be wrapped, like a library or GNOME Shell extension. {#ssec-gnome-common-issues-unwrappable-package}
+
+You can rely on applications depending on the library setting the necessary environment variables but that is often easy to miss. Instead we recommend to patch the paths in the source code whenever possible. Here are some examples:
+
+- []{#ssec-gnome-common-issues-unwrappable-package-gnome-shell-ext} [Replacing a `GI_TYPELIB_PATH` in GNOME Shell extension](https://github.com/NixOS/nixpkgs/blob/7bb8f05f12ca3cff9da72b56caa2f7472d5732bc/pkgs/desktops/gnome-3/core/gnome-shell-extensions/default.nix#L21-L24) – we are using `substituteAll` to include the path to a typelib into a patch.
+
+- []{#ssec-gnome-common-issues-unwrappable-package-gsettings} The following examples are hardcoding GSettings schema paths. To get the schema paths we use the functions
+
+  * `glib.getSchemaPath` Takes a nix package attribute as an argument.
+
+  * `glib.makeSchemaPath` Takes a package output like `$out` and a derivation name. You should use this if the schemas you need to hardcode are in the same derivation.
+
+  []{#ssec-gnome-common-issues-unwrappable-package-gsettings-vala} [Hard-coding GSettings schema path in Vala plug-in (dynamically loaded library)](https://github.com/NixOS/nixpkgs/blob/7bb8f05f12ca3cff9da72b56caa2f7472d5732bc/pkgs/desktops/pantheon/apps/elementary-files/default.nix#L78-L86) – here, `substituteAll` cannot be used since the schema comes from the same package preventing us from pass its path to the function, probably due to a [Nix bug](https://github.com/NixOS/nix/issues/1846).
+
+  []{#ssec-gnome-common-issues-unwrappable-package-gsettings-c} [Hard-coding GSettings schema path in C library](https://github.com/NixOS/nixpkgs/blob/29c120c065d03b000224872251bed93932d42412/pkgs/development/libraries/glib-networking/default.nix#L31-L34) – nothing special other than using [Coccinelle patch](https://github.com/NixOS/nixpkgs/pull/67957#issuecomment-527717467) to generate the patch itself.
+
+### I need to wrap a binary outside `bin` and `libexec` directories. {#ssec-gnome-common-issues-weird-location}
+
+You can manually trigger the wrapping with `wrapGApp` in `preFixup` phase. It takes a path to a program as a first argument; the remaining arguments are passed directly to [`wrapProgram`](#fun-wrapProgram) function.
diff --git a/nixpkgs/doc/languages-frameworks/go.section.md b/nixpkgs/doc/languages-frameworks/go.section.md
new file mode 100644
index 000000000000..6db0e73505d2
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/go.section.md
@@ -0,0 +1,288 @@
+# Go {#sec-language-go}
+
+## Building Go modules with `buildGoModule` {#ssec-language-go}
+
+The function `buildGoModule` builds Go programs managed with Go modules. It builds [Go Modules](https://github.com/golang/go/wiki/Modules) through a two phase build:
+
+- An intermediate fetcher derivation called `goModules`. This derivation will be used to fetch all the dependencies of the Go module.
+- A final derivation will use the output of the intermediate derivation to build the binaries and produce the final output.
+
+### Attributes of `buildGoModule` {#buildgomodule-parameters}
+
+The `buildGoModule` function accepts the following parameters in addition to the [attributes accepted by both Go builders](#ssec-go-common-attributes):
+
+- `vendorHash`: is the hash of the output of the intermediate fetcher derivation (the dependencies of the Go modules).
+
+  `vendorHash` can be set to `null`.
+  In that case, rather than fetching the dependencies, the dependencies already vendored in the `vendor` directory of the source repo will be used.
+
+  To avoid updating this field when dependencies change, run `go mod vendor` in your source repo and set `vendorHash = null;`.
+  You can read more about [vendoring in the Go documentation](https://go.dev/ref/mod#vendoring).
+
+  To obtain the actual hash, set `vendorHash = lib.fakeHash;` and run the build ([more details here](#sec-source-hashes)).
+- `proxyVendor`: If `true`, the intermediate fetcher downloads dependencies from the
+  [Go module proxy](https://go.dev/ref/mod#module-proxy) (using `go mod download`) instead of vendoring them. The resulting
+  [module cache](https://go.dev/ref/mod#module-cache) is then passed to the final derivation.
+
+  This is useful if your code depends on C code and `go mod tidy` does not include the needed sources to build or
+  if any dependency has case-insensitive conflicts which will produce platform-dependent `vendorHash` checksums.
+
+  Defaults to `false`.
+- `modPostBuild`: Shell commands to run after the build of the goModules executes `go mod vendor`, and before calculating fixed output derivation's `vendorHash`.
+  Note that if you change this attribute, you need to update `vendorHash` attribute.
+- `modRoot`: The root directory of the Go module that contains the `go.mod` file.
+  Defaults to `./`, which is the root of `src`.
+
+### Example for `buildGoModule` {#ex-buildGoModule}
+
+The following is an example expression using `buildGoModule`:
+
+```nix
+{
+  pet = buildGoModule rec {
+    pname = "pet";
+    version = "0.3.4";
+
+    src = fetchFromGitHub {
+      owner = "knqyf263";
+      repo = "pet";
+      rev = "v${version}";
+      hash = "sha256-Gjw1dRrgM8D3G7v6WIM2+50r4HmTXvx0Xxme2fH9TlQ=";
+    };
+
+    vendorHash = "sha256-ciBIR+a1oaYH+H1PcC8cD8ncfJczk1IiJ8iYNM+R6aA=";
+
+    meta = {
+      description = "Simple command-line snippet manager, written in Go";
+      homepage = "https://github.com/knqyf263/pet";
+      license = lib.licenses.mit;
+      maintainers = with lib.maintainers; [ kalbasit ];
+    };
+  };
+}
+```
+
+## `buildGoPackage` (legacy) {#ssec-go-legacy}
+
+The function `buildGoPackage` builds legacy Go programs, not supporting Go modules.
+
+### Example for `buildGoPackage` {#example-for-buildgopackage}
+
+In the following is an example expression using `buildGoPackage`, the following arguments are of special significance to the function:
+
+- `goPackagePath` specifies the package's canonical Go import path.
+- `goDeps` is where the Go dependencies of a Go program are listed as a list of package source identified by Go import path. It could be imported as a separate `deps.nix` file for readability. The dependency data structure is described below.
+
+```nix
+{
+  deis = buildGoPackage rec {
+    pname = "deis";
+    version = "1.13.0";
+
+    goPackagePath = "github.com/deis/deis";
+
+    src = fetchFromGitHub {
+      owner = "deis";
+      repo = "deis";
+      rev = "v${version}";
+      hash = "sha256-XCPD4LNWtAd8uz7zyCLRfT8rzxycIUmTACjU03GnaeM=";
+    };
+
+    goDeps = ./deps.nix;
+  };
+}
+```
+
+The `goDeps` attribute can be imported from a separate `nix` file that defines which Go libraries are needed and should be included in `GOPATH` for `buildPhase`:
+
+```nix
+# deps.nix
+[ # goDeps is a list of Go dependencies.
+  {
+    # goPackagePath specifies Go package import path.
+    goPackagePath = "gopkg.in/yaml.v2";
+    fetch = {
+      # `fetch type` that needs to be used to get package source.
+      # If `git` is used there should be `url`, `rev` and `hash` defined next to it.
+      type = "git";
+      url = "https://gopkg.in/yaml.v2";
+      rev = "a83829b6f1293c91addabc89d0571c246397bbf4";
+      hash = "sha256-EMrdy0M0tNuOcITaTAmT5/dPSKPXwHDKCXFpkGbVjdQ=";
+    };
+  }
+  {
+    goPackagePath = "github.com/docopt/docopt-go";
+    fetch = {
+      type = "git";
+      url = "https://github.com/docopt/docopt-go";
+      rev = "784ddc588536785e7299f7272f39101f7faccc3f";
+      hash = "sha256-Uo89zjE+v3R7zzOq/gbQOHj3SMYt2W1nDHS7RCUin3M=";
+    };
+  }
+]
+```
+
+To extract dependency information from a Go package in automated way use [go2nix (deprecated)](https://github.com/kamilchm/go2nix). It can produce complete derivation and `goDeps` file for Go programs.
+
+You may use Go packages installed into the active Nix profiles by adding the following to your ~/.bashrc:
+
+```bash
+for p in $NIX_PROFILES; do
+    GOPATH="$p/share/go:$GOPATH"
+done
+```
+
+## Attributes used by both builders {#ssec-go-common-attributes}
+
+Many attributes [controlling the build phase](#variables-controlling-the-build-phase) are respected by both `buildGoModule` and `buildGoPackage`. Note that `buildGoModule` reads the following attributes also when building the `vendor/` goModules fixed output derivation as well:
+
+- [`sourceRoot`](#var-stdenv-sourceRoot)
+- [`prePatch`](#var-stdenv-prePatch)
+- [`patches`](#var-stdenv-patches)
+- [`patchFlags`](#var-stdenv-patchFlags)
+- [`postPatch`](#var-stdenv-postPatch)
+- [`preBuild`](#var-stdenv-preBuild)
+
+To control test execution of the build derivation, the following attributes are of interest:
+
+- [`checkInputs`](#var-stdenv-checkInputs)
+- [`preCheck`](#var-stdenv-preCheck)
+- [`checkFlags`](#var-stdenv-checkFlags)
+
+In addition to the above attributes, and the many more variables respected also by `stdenv.mkDerivation`, both `buildGoModule` and `buildGoPackage` respect Go-specific attributes that tweak them to behave slightly differently:
+
+### `ldflags` {#var-go-ldflags}
+
+A string list of flags to pass to the Go linker tool via the `-ldflags` argument of `go build`. Possible values can be retrieved by running `go tool link --help`.
+The most common use case for this argument is to make the resulting executable aware of its own version by injecting the value of string variable using the `-X` flag. For example:
+
+```nix
+{
+  ldflags = [
+    "-X main.Version=${version}"
+    "-X main.Commit=${version}"
+  ];
+}
+```
+
+### `tags` {#var-go-tags}
+
+A string list of [Go build tags (also called build constraints)](https://pkg.go.dev/cmd/go#hdr-Build_constraints) that are passed via the `-tags` argument of `go build`.  These constraints control whether Go files from the source should be included in the build. For example:
+
+```nix
+{
+  tags = [
+    "production"
+    "sqlite"
+  ];
+}
+```
+
+Tags can also be set conditionally:
+
+```nix
+{
+  tags = [ "production" ] ++ lib.optionals withSqlite [ "sqlite" ];
+}
+```
+
+### `deleteVendor` {#var-go-deleteVendor}
+
+If set to `true`, removes the pre-existing vendor directory. This should only be used if the dependencies included in the vendor folder are broken or incomplete.
+
+### `subPackages` {#var-go-subPackages}
+
+Specified as a string or list of strings. Limits the builder from building child packages that have not been listed. If `subPackages` is not specified, all child packages will be built.
+
+Many Go projects keep the main package in a `cmd` directory.
+Following example could be used to only build the example-cli and example-server binaries:
+
+```nix
+{
+  subPackages = [
+    "cmd/example-cli"
+    "cmd/example-server"
+  ];
+}
+```
+
+### `excludedPackages` {#var-go-excludedPackages}
+
+Specified as a string or list of strings. Causes the builder to skip building child packages that match any of the provided values.
+
+### `CGO_ENABLED` {#var-go-CGO_ENABLED}
+
+When set to `0`, the [cgo](https://pkg.go.dev/cmd/cgo) command is disabled. As consequence, the build
+program can't link against C libraries anymore, and the resulting binary is statically linked.
+
+When building with CGO enabled, Go will likely link some packages from the Go standard library against C libraries,
+even when the target code does not explicitly call into C dependencies. With `CGO_ENABLED = 0;`, Go
+will always use the Go native implementation of these internal packages. For reference see
+[net](https://pkg.go.dev/net#hdr-Name_Resolution) and [os/user](https://pkg.go.dev/os/user#pkg-overview) packages.
+Notice that the decision whether these packages should use native Go implementation or not can also be controlled
+on a per package level using build tags (`tags`). In case CGO is disabled, these tags have no additional effect.
+
+When a Go program depends on C libraries, place those dependencies in `buildInputs`:
+
+```nix
+{
+  buildInputs = [
+    libvirt
+    libxml2
+  ];
+}
+```
+
+`CGO_ENABLED` defaults to `1`.
+
+### `enableParallelBuilding` {#var-go-enableParallelBuilding}
+
+Whether builds and tests should run in parallel.
+
+Defaults to `true`.
+
+### `allowGoReference` {#var-go-allowGoReference}
+
+Whether the build result should be allowed to contain references to the Go tool chain. This might be needed for programs that are coupled with the compiler, but shouldn't be set without a good reason.
+
+Defaults to `false`
+
+## Controlling the Go environment {#ssec-go-environment}
+
+The Go build can be further tweaked by setting environment variables. In most cases, this isn't needed. Possible values can be found in the [Go documentation of accepted environment variables](https://pkg.go.dev/cmd/go#hdr-Environment_variables). Notice that some of these flags are set by the builder itself and should not be set explicitly. If in doubt, grep the implementation of the builder.
+
+## Skipping tests {#ssec-skip-go-tests}
+
+`buildGoModule` runs tests by default. Failing tests can be disabled using the `checkFlags` parameter.
+This is done with the [`-skip` or `-run`](https://pkg.go.dev/cmd/go#hdr-Testing_flags) flags of the `go test` command.
+
+For example, only a selection of tests could be run with:
+
+```nix
+{
+  # -run and -skip accept regular expressions
+  checkFlags = [
+    "-run=^Test(Simple|Fast)$"
+  ];
+}
+```
+
+If a larger amount of tests should be skipped, the following pattern can be used:
+
+```nix
+{
+  checkFlags =
+    let
+      # Skip tests that require network access
+      skippedTests = [
+        "TestNetwork"
+        "TestDatabase/with_mysql" # exclude only the subtest
+        "TestIntegration"
+      ];
+    in
+    [ "-skip=^${builtins.concatStringsSep "$|^" skippedTests}$" ];
+}
+```
+
+To disable tests altogether, set `doCheck = false;`.
+`buildGoPackage` does not execute tests by default.
diff --git a/nixpkgs/doc/languages-frameworks/haskell.section.md b/nixpkgs/doc/languages-frameworks/haskell.section.md
new file mode 100644
index 000000000000..5d7796b554de
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/haskell.section.md
@@ -0,0 +1,1311 @@
+# Haskell {#haskell}
+
+The Haskell infrastructure in Nixpkgs has two main purposes: The primary purpose
+is to provide a Haskell compiler and build tools as well as infrastructure for
+packaging Haskell-based packages.
+
+The secondary purpose is to provide support for Haskell development environments
+including prebuilt Haskell libraries. However, in this area sacrifices have been
+made due to self-imposed restrictions in Nixpkgs, to lessen the maintenance
+effort and to improve performance. (More details in the subsection
+[Limitations.](#haskell-limitations))
+
+## Available packages {#haskell-available-packages}
+
+The compiler and most build tools are exposed at the top level:
+
+* `ghc` is the default version of GHC
+* Language specific tools: `cabal-install`, `stack`, `hpack`, …
+
+Many “normal” user facing packages written in Haskell, like `niv` or `cachix`,
+are also exposed at the top level, and there is nothing Haskell specific to
+installing and using them.
+
+All of these packages are originally defined in the `haskellPackages` package
+set and are re-exposed with a reduced dependency closure for convenience.
+(see `justStaticExecutables` or `separateBinOutput` below)
+
+The `haskellPackages` set includes at least one version of every package from
+Hackage as well as some manually injected packages. This amounts to a lot of
+packages, so it is hidden from `nix-env -qa` by default for performance reasons.
+You can still list all packages in the set like this:
+
+```console
+$ nix-env -f '<nixpkgs>' -qaP -A haskellPackages
+haskellPackages.a50                                                         a50-0.5
+haskellPackages.AAI                                                         AAI-0.2.0.1
+haskellPackages.aasam                                                       aasam-0.2.0.0
+haskellPackages.abacate                                                     abacate-0.0.0.0
+haskellPackages.abc-puzzle                                                  abc-puzzle-0.2.1
+…
+```
+Also, the `haskellPackages` set is included on [search.nixos.org].
+
+The attribute names in `haskellPackages` always correspond with their name on
+Hackage. Since Hackage allows names that are not valid Nix without escaping,
+you need to take care when handling attribute names like `3dmodels`.
+
+For packages that are part of [Stackage] (a curated set of known to be
+compatible packages), we use the version prescribed by a Stackage snapshot
+(usually the current LTS one) as the default version. For all other packages we
+use the latest version from [Hackage](https://hackage.org) (the repository of
+basically all open source Haskell packages). See [below](#haskell-available-
+versions) for a few more details on this.
+
+Roughly half of the 16K packages contained in `haskellPackages` don’t actually
+build and are [marked as broken semi-automatically](https://github.com/NixOS/nixpkgs/blob/haskell-updates/pkgs/development/haskell-modules/configuration-hackage2nix/broken.yaml).
+Most of those packages are deprecated or unmaintained, but sometimes packages
+that should build, do not build. Very often fixing them is not a lot of work.
+
+<!--
+TODO(@sternenseemann):
+How you can help with that is
+described in [Fixing a broken package](#haskell-fixing-a-broken-package).
+-->
+
+`haskellPackages` is built with our default compiler, but we also provide other
+releases of GHC and package sets built with them. You can list all available
+compilers like this:
+
+```console
+$ nix-env -f '<nixpkgs>' -qaP -A haskell.compiler
+haskell.compiler.ghc810                  ghc-8.10.7
+haskell.compiler.ghc90                   ghc-9.0.2
+haskell.compiler.ghc925                  ghc-9.2.5
+haskell.compiler.ghc926                  ghc-9.2.6
+haskell.compiler.ghc927                  ghc-9.2.7
+haskell.compiler.ghc92                   ghc-9.2.8
+haskell.compiler.ghc945                  ghc-9.4.5
+haskell.compiler.ghc946                  ghc-9.4.6
+haskell.compiler.ghc947                  ghc-9.4.7
+haskell.compiler.ghc94                   ghc-9.4.8
+haskell.compiler.ghc963                  ghc-9.6.3
+haskell.compiler.ghc96                   ghc-9.6.4
+haskell.compiler.ghc98                   ghc-9.8.1
+haskell.compiler.ghcHEAD                 ghc-9.9.20231121
+haskell.compiler.ghc8107Binary           ghc-binary-8.10.7
+haskell.compiler.ghc865Binary            ghc-binary-8.6.5
+haskell.compiler.ghc924Binary            ghc-binary-9.2.4
+haskell.compiler.integer-simple.ghc8107  ghc-integer-simple-8.10.7
+haskell.compiler.integer-simple.ghc810   ghc-integer-simple-8.10.7
+haskell.compiler.native-bignum.ghc90     ghc-native-bignum-9.0.2
+haskell.compiler.native-bignum.ghc902    ghc-native-bignum-9.0.2
+haskell.compiler.native-bignum.ghc925    ghc-native-bignum-9.2.5
+haskell.compiler.native-bignum.ghc926    ghc-native-bignum-9.2.6
+haskell.compiler.native-bignum.ghc927    ghc-native-bignum-9.2.7
+haskell.compiler.native-bignum.ghc92     ghc-native-bignum-9.2.8
+haskell.compiler.native-bignum.ghc928    ghc-native-bignum-9.2.8
+haskell.compiler.native-bignum.ghc945    ghc-native-bignum-9.4.5
+haskell.compiler.native-bignum.ghc946    ghc-native-bignum-9.4.6
+haskell.compiler.native-bignum.ghc947    ghc-native-bignum-9.4.7
+haskell.compiler.native-bignum.ghc94     ghc-native-bignum-9.4.8
+haskell.compiler.native-bignum.ghc948    ghc-native-bignum-9.4.8
+haskell.compiler.native-bignum.ghc963    ghc-native-bignum-9.6.3
+haskell.compiler.native-bignum.ghc96     ghc-native-bignum-9.6.4
+haskell.compiler.native-bignum.ghc964    ghc-native-bignum-9.6.4
+haskell.compiler.native-bignum.ghc98     ghc-native-bignum-9.8.1
+haskell.compiler.native-bignum.ghc981    ghc-native-bignum-9.8.1
+haskell.compiler.native-bignum.ghcHEAD   ghc-native-bignum-9.9.20231121
+haskell.compiler.ghcjs                   ghcjs-8.10.7
+```
+
+Each of those compiler versions has a corresponding attribute set built using
+it. However, the non-standard package sets are not tested regularly and, as a
+result, contain fewer working packages. The corresponding package set for GHC
+9.4.5 is `haskell.packages.ghc945`. In fact `haskellPackages` is just an alias
+for `haskell.packages.ghc964`:
+
+```console
+$ nix-env -f '<nixpkgs>' -qaP -A haskell.packages.ghc927
+haskell.packages.ghc927.a50                                                         a50-0.5
+haskell.packages.ghc927.AAI                                                         AAI-0.2.0.1
+haskell.packages.ghc927.aasam                                                       aasam-0.2.0.0
+haskell.packages.ghc927.abacate                                                     abacate-0.0.0.0
+haskell.packages.ghc927.abc-puzzle                                                  abc-puzzle-0.2.1
+…
+```
+
+Every package set also re-exposes the GHC used to build its packages as `haskell.packages.*.ghc`.
+
+### Available package versions {#haskell-available-versions}
+
+We aim for a “blessed” package set which only contains one version of each
+package, like [Stackage], which is a curated set of known to be compatible
+packages. We use the version information from Stackage snapshots and extend it
+with more packages. Normally in Nixpkgs the number of building Haskell packages
+is roughly two to three times the size of Stackage. For choosing the version to
+use for a certain package we use the following rules:
+
+1. By default, for `haskellPackages.foo` is the newest version of the package
+`foo` found on [Hackage](https://hackage.org), which is the central registry
+of all open source Haskell packages. Nixpkgs contains a reference to a pinned
+Hackage snapshot, thus we use the state of Hackage as of the last time we
+updated this pin.
+2. If the [Stackage] snapshot that we use (usually the newest LTS snapshot)
+contains a package, [we use instead the version in the Stackage snapshot as
+default version for that package.](https://github.com/NixOS/nixpkgs/blob/haskell-updates/pkgs/development/haskell-modules/configuration-hackage2nix/stackage.yaml)
+3. For some packages, which are not on Stackage, we have if necessary [manual
+overrides to set the default version to a version older than the newest on
+Hackage.](https://github.com/NixOS/nixpkgs/blob/haskell-updates/pkgs/development/haskell-modules/configuration-hackage2nix/main.yaml)
+4. For all packages, for which the newest Hackage version is not the default
+version, there will also be a `haskellPackages.foo_x_y_z` package with the
+newest version. The `x_y_z` part encodes the version with dots replaced by
+underscores. When the newest version changes by a new release to Hackage the
+old package will disappear under that name and be replaced by a newer one under
+the name with the new version. The package name including the version will
+also disappear when the default version e.g. from Stackage catches up with the
+newest version from Hackage. E.g. if `haskellPackages.foo` gets updated from
+1.0.0 to 1.1.0 the package `haskellPackages.foo_1_1_0` becomes obsolete and
+gets dropped.
+5. For some packages, we also [manually add other `haskellPackages.foo_x_y_z`
+versions](https://github.com/NixOS/nixpkgs/blob/haskell-updates/pkgs/development/haskell-modules/configuration-hackage2nix/main.yaml),
+if they are required for a certain build.
+
+Relying on `haskellPackages.foo_x_y_z` attributes in derivations outside
+nixpkgs is discouraged because they may change or disappear with every package
+set update.
+<!-- TODO(@maralorn) We should add a link to callHackage, etc. once we added
+them to the docs. -->
+
+All `haskell.packages.*` package sets use the same package descriptions and the same sets
+of versions by default. There are however GHC version specific override `.nix`
+files to loosen this a bit.
+
+### Dependency resolution {#haskell-dependency-resolution}
+
+Normally when you build Haskell packages with `cabal-install`, `cabal-install`
+does dependency resolution. It will look at all Haskell package versions known
+on Hackage and tries to pick for every (transitive) dependency of your build
+exactly one version. Those versions need to satisfy all the version constraints
+given in the `.cabal` file of your package and all its dependencies.
+
+The [Haskell builder in nixpkgs](#haskell-mkderivation) does no such thing.
+It will take as input packages with names off the desired dependencies
+and just check whether they fulfill the version bounds and fail if they don’t
+(by default, see `jailbreak` to circumvent this).
+
+The `haskellPackages.callPackage` function does the package resolution.
+It will, e.g., use `haskellPackages.aeson`which has the default version as
+described above for a package input of name `aeson`. (More general:
+`<packages>.callPackage f` will call `f` with named inputs provided from the
+package set `<packages>`.)
+While this is the default behavior, it is possible to override the dependencies
+for a specific package, see
+[`override` and `overrideScope`](#haskell-overriding-haskell-packages).
+
+### Limitations {#haskell-limitations}
+
+Our main objective with `haskellPackages` is to package Haskell software in
+nixpkgs. This entails some limitations, partially due to self-imposed
+restrictions of nixpkgs, partially in the name of maintainability:
+
+* Only the packages built with the default compiler see extensive testing of the
+  whole package set. For other GHC versions only a few essential packages are
+  tested and cached.
+* As described above we only build one version of most packages.
+
+The experience using an older or newer packaged compiler or using different
+versions may be worse, because builds will not be cached on `cache.nixos.org`
+or may fail.
+
+Thus, to get the best experience, make sure that your project can be compiled
+using the default compiler of nixpkgs and recent versions of its dependencies.
+
+A result of this setup is, that getting a valid build plan for a given
+package can sometimes be quite painful, and in fact this is where most of the
+maintenance work for `haskellPackages` is required. Besides that, it is not
+possible to get the dependencies of a legacy project from nixpkgs or to use a
+specific stack solver for compiling a project.
+
+Even though we couldn’t use them directly in nixpkgs, it would be desirable
+to have tooling to generate working Nix package sets from build plans generated
+by `cabal-install` or a specific Stackage snapshot via import-from-derivation.
+Sadly we currently don’t have tooling for this. For this you might be
+interested in the alternative [haskell.nix] framework, which, be warned, is
+completely incompatible with packages from `haskellPackages`.
+
+<!-- TODO(@maralorn) Link to package set generation docs in the contributors guide below. -->
+
+## `haskellPackages.mkDerivation` {#haskell-mkderivation}
+
+Every haskell package set has its own haskell-aware `mkDerivation` which is used
+to build its packages. Generally you won't have to interact with this builder
+since [cabal2nix][cabal2nix] can generate packages
+using it for an arbitrary cabal package definition. Still it is useful to know
+the parameters it takes when you need to
+[override](#haskell-overriding-haskell-packages) a generated Nix expression.
+
+`haskellPackages.mkDerivation` is a wrapper around `stdenv.mkDerivation` which
+re-defines the default phases to be haskell aware and handles dependency
+specification, test suites, benchmarks etc. by compiling and invoking the
+package's `Setup.hs`. It does *not* use or invoke the `cabal-install` binary,
+but uses the underlying `Cabal` library instead.
+
+### General arguments {#haskell-derivation-args}
+
+`pname`
+: Package name, assumed to be the same as on Hackage (if applicable)
+
+`version`
+: Packaged version, assumed to be the same as on Hackage (if applicable)
+
+`src`
+: Source of the package. If omitted, fetch package corresponding to `pname`
+and `version` from Hackage.
+
+`sha256`
+: Hash to use for the default case of `src`.
+
+`revision`
+: Revision number of the updated cabal file to fetch from Hackage.
+If `null` (which is the default value), the one included in `src` is used.
+
+`editedCabalFile`
+: `sha256` hash of the cabal file identified by `revision` or `null`.
+
+`configureFlags`
+: Extra flags passed when executing the `configure` command of `Setup.hs`.
+
+`buildFlags`
+: Extra flags passed when executing the `build` command of `Setup.hs`.
+
+`haddockFlags`
+: Extra flags passed to `Setup.hs haddock` when building the documentation.
+
+`doCheck`
+: Whether to execute the package's test suite if it has one. Defaults to `true` unless cross-compiling.
+
+`doBenchmark`
+: Whether to execute the package's benchmark if it has one. Defaults to `false`.
+
+`doHoogle`
+: Whether to generate an index file for [hoogle][hoogle] as part of
+`haddockPhase` by passing the [`--hoogle` option][haddock-hoogle-option].
+Defaults to `true`.
+
+`doHaddockQuickjump`
+: Whether to generate an index for interactive navigation of the HTML documentation.
+Defaults to `true` if supported.
+
+`doInstallIntermediates`
+: Whether to install intermediate build products (files written to `dist/build`
+by GHC during the build process). With `enableSeparateIntermediatesOutput`,
+these files are instead installed to [a separate `intermediates`
+output.][multiple-outputs] The output can then be passed into a future build of
+the same package with the `previousIntermediates` argument to support
+incremental builds. See [“Incremental builds”](#haskell-incremental-builds) for
+more information. Defaults to `false`.
+
+`enableLibraryProfiling`
+: Whether to enable [profiling][profiling] for libraries contained in the
+package. Enabled by default if supported.
+
+`enableExecutableProfiling`
+: Whether to enable [profiling][profiling] for executables contained in the
+package. Disabled by default.
+
+`profilingDetail`
+: [Profiling detail level][profiling-detail] to set. Defaults to `exported-functions`.
+
+`enableSharedExecutables`
+: Whether to link executables dynamically. By default, executables are linked statically.
+
+`enableSharedLibraries`
+: Whether to build shared Haskell libraries. This is enabled by default unless we are using
+`pkgsStatic` or shared libraries have been disabled in GHC.
+
+`enableStaticLibraries`
+: Whether to build static libraries. Enabled by default if supported.
+
+`enableDeadCodeElimination`
+: Whether to enable linker based dead code elimination in GHC.
+Enabled by default if supported.
+
+`enableHsc2hsViaAsm`
+: Whether to pass `--via-asm` to `hsc2hs`. Enabled by default only on Windows.
+
+`hyperlinkSource`
+: Whether to render the source as well as part of the haddock documentation
+by passing the [`--hyperlinked-source` flag][haddock-hyperlinked-source-option].
+Defaults to `true`.
+
+`isExecutable`
+: Whether the package contains an executable.
+
+`isLibrary`
+: Whether the package contains a library.
+
+`jailbreak`
+: Whether to execute [jailbreak-cabal][jailbreak-cabal] before `configurePhase`
+to lift any version constraints in the cabal file. Note that this can't
+lift version bounds if they are conditional, i.e. if a dependency is hidden
+behind a flag.
+
+`enableParallelBuilding`
+: Whether to use the `-j` flag to make GHC/Cabal start multiple jobs in parallel.
+
+`maxBuildCores`
+: Upper limit of jobs to use in parallel for compilation regardless of
+`$NIX_BUILD_CORES`. Defaults to 16 as Haskell compilation with GHC currently
+sees a [performance regression](https://gitlab.haskell.org/ghc/ghc/-/issues/9221)
+if too many parallel jobs are used.
+
+`doCoverage`
+: Whether to generate and install files needed for [HPC][haskell-program-coverage].
+Defaults to `false`.
+
+`doHaddock`
+: Whether to build (HTML) documentation using [haddock][haddock].
+Defaults to `true` if supported.
+
+`testTarget`
+: Name of the test suite to build and run. If unset, all test suites will be executed.
+
+`preCompileBuildDriver`
+: Shell code to run before compiling `Setup.hs`.
+
+`postCompileBuildDriver`
+: Shell code to run after compiling `Setup.hs`.
+
+`preHaddock`
+: Shell code to run before building documentation using haddock.
+
+`postHaddock`
+: Shell code to run after building documentation using haddock.
+
+`coreSetup`
+: Whether to only allow core libraries to be used while building `Setup.hs`.
+Defaults to `false`.
+
+`useCpphs`
+: Whether to enable the [cpphs][cpphs] preprocessor. Defaults to `false`.
+
+`enableSeparateBinOutput`
+: Whether to install executables to a separate `bin` output. Defaults to `false`.
+
+`enableSeparateDataOutput`
+: Whether to install data files shipped with the package to a separate `data` output.
+Defaults to `false`.
+
+`enableSeparateDocOutput`
+: Whether to install documentation to a separate `doc` output.
+Is automatically enabled if `doHaddock` is `true`.
+
+`enableSeparateIntermediatesOutput`
+: When `doInstallIntermediates` is true, whether to install intermediate build
+products to a separate `intermediates` output. See [“Incremental
+builds”](#haskell-incremental-builds) for more information. Defaults to
+`false`.
+
+`allowInconsistentDependencies`
+: If enabled, allow multiple versions of the same Haskell package in the
+dependency tree at configure time. Often in such a situation compilation would
+later fail because of type mismatches. Defaults to `false`.
+
+`enableLibraryForGhci`
+: Build and install a special object file for GHCi. This improves performance
+when loading the library in the REPL, but requires extra build time and
+disk space. Defaults to `false`.
+
+`previousIntermediates`
+: If non-null, intermediate build artifacts are copied from this input to
+`dist/build` before performing compiling. See [“Incremental
+builds”](#haskell-incremental-builds) for more information. Defaults to `null`.
+
+`buildTarget`
+: Name of the executable or library to build and install.
+If unset, all available targets are built and installed.
+
+### Specifying dependencies {#haskell-derivation-deps}
+
+Since `haskellPackages.mkDerivation` is intended to be generated from cabal
+files, it reflects cabal's way of specifying dependencies. For one, dependencies
+are grouped by what part of the package they belong to. This helps to reduce the
+dependency closure of a derivation, for example benchmark dependencies are not
+included if `doBenchmark == false`.
+
+`setup*Depends`
+: dependencies necessary to compile `Setup.hs`
+
+`library*Depends`
+: dependencies of a library contained in the package
+
+`executable*Depends`
+: dependencies of an executable contained in the package
+
+`test*Depends`
+: dependencies of a test suite contained in the package
+
+`benchmark*Depends`
+: dependencies of a benchmark contained in the package
+
+The other categorization relates to the way the package depends on the dependency:
+
+`*ToolDepends`
+: Tools we need to run as part of the build process.
+They are added to the derivation's `nativeBuildInputs`.
+
+`*HaskellDepends`
+: Haskell libraries the package depends on.
+They are added to `propagatedBuildInputs`.
+
+`*SystemDepends`
+: Non-Haskell libraries the package depends on.
+They are added to `buildInputs`
+
+`*PkgconfigDepends`
+: `*SystemDepends` which are discovered using `pkg-config`.
+They are added to `buildInputs` and it is additionally
+ensured that `pkg-config` is available at build time.
+
+`*FrameworkDepends`
+: Apple SDK Framework which the package depends on when compiling it on Darwin.
+
+Using these two distinctions, you should be able to categorize most of the dependency
+specifications that are available:
+`benchmarkFrameworkDepends`,
+`benchmarkHaskellDepends`,
+`benchmarkPkgconfigDepends`,
+`benchmarkSystemDepends`,
+`benchmarkToolDepends`,
+`executableFrameworkDepends`,
+`executableHaskellDepends`,
+`executablePkgconfigDepends`,
+`executableSystemDepends`,
+`executableToolDepends`,
+`libraryFrameworkDepends`,
+`libraryHaskellDepends`,
+`libraryPkgconfigDepends`,
+`librarySystemDepends`,
+`libraryToolDepends`,
+`setupHaskellDepends`,
+`testFrameworkDepends`,
+`testHaskellDepends`,
+`testPkgconfigDepends`,
+`testSystemDepends` and
+`testToolDepends`.
+
+That only leaves the following extra ways for specifying dependencies:
+
+`buildDepends`
+: Allows specifying Haskell dependencies which are added to `propagatedBuildInputs` unconditionally.
+
+`buildTools`
+: Like `*ToolDepends`, but are added to `nativeBuildInputs` unconditionally.
+
+`extraLibraries`
+: Like `*SystemDepends`, but are added to `buildInputs` unconditionally.
+
+`pkg-configDepends`
+: Like `*PkgconfigDepends`, but are added to `buildInputs` unconditionally.
+
+`testDepends`
+: Deprecated, use either `testHaskellDepends` or `testSystemDepends`.
+
+`benchmarkDepends`
+: Deprecated, use either `benchmarkHaskellDepends` or `benchmarkSystemDepends`.
+
+The dependency specification methods in this list which are unconditional
+are especially useful when writing [overrides](#haskell-overriding-haskell-packages)
+when you want to make sure that they are definitely included. However, it is
+recommended to use the more accurate ones listed above when possible.
+
+### Meta attributes {#haskell-derivation-meta}
+
+`haskellPackages.mkDerivation` accepts the following attributes as direct
+arguments which are transparently set in `meta` of the resulting derivation. See
+the [Meta-attributes section](#chap-meta) for their documentation.
+
+* These attributes are populated with a default value if omitted:
+    * `homepage`: defaults to the Hackage page for `pname`.
+    * `platforms`: defaults to `lib.platforms.all` (since GHC can cross-compile)
+* These attributes are only set if given:
+    * `description`
+    * `license`
+    * `changelog`
+    * `maintainers`
+    * `broken`
+    * `hydraPlatforms`
+
+### Incremental builds {#haskell-incremental-builds}
+
+`haskellPackages.mkDerivation` supports incremental builds for GHC 9.4 and
+newer with the `doInstallIntermediates`, `enableSeparateIntermediatesOutput`,
+and `previousIntermediates` arguments.
+
+The basic idea is to first perform a full build of the package in question,
+save its intermediate build products for later, and then copy those build
+products into the build directory of an incremental build performed later.
+Then, GHC will use those build artifacts to avoid recompiling unchanged
+modules.
+
+For more detail on how to store and use incremental build products, see
+[Gabriella Gonzalez’ blog post “Nixpkgs support for incremental Haskell
+builds”.][incremental-builds] motivation behind this feature.
+
+An incremental build for [the `turtle` package][turtle] can be performed like
+so:
+
+```nix
+let
+  pkgs = import <nixpkgs> {};
+  inherit (pkgs) haskell;
+  inherit (haskell.lib.compose) overrideCabal;
+
+  # Incremental builds work with GHC >=9.4.
+  turtle = haskell.packages.ghc944.turtle;
+
+  # This will do a full build of `turtle`, while writing the intermediate build products
+  # (compiled modules, etc.) to the `intermediates` output.
+  turtle-full-build-with-incremental-output = overrideCabal (drv: {
+    doInstallIntermediates = true;
+    enableSeparateIntermediatesOutput = true;
+  }) turtle;
+
+  # This will do an incremental build of `turtle` by copying the previously
+  # compiled modules and intermediate build products into the source tree
+  # before running the build.
+  #
+  # GHC will then naturally pick up and reuse these products, making this build
+  # complete much more quickly than the previous one.
+  turtle-incremental-build = overrideCabal (drv: {
+    previousIntermediates = turtle-full-build-with-incremental-output.intermediates;
+  }) turtle;
+in
+  turtle-incremental-build
+```
+
+## Development environments {#haskell-development-environments}
+
+In addition to building and installing Haskell software, nixpkgs can also
+provide development environments for Haskell projects. This has the obvious
+advantage that you benefit from `cache.nixos.org` and no longer need to compile
+all project dependencies yourself. While it is often very useful, this is not
+the primary use case of our package set. Have a look at the section
+[available package versions](#haskell-available-versions) to learn which
+versions of packages we provide and the section
+[limitations](#haskell-limitations), to judge whether a `haskellPackages`
+based development environment for your project is feasible.
+
+By default, every derivation built using
+[`haskellPackages.mkDerivation`](#haskell-mkderivation) exposes an environment
+suitable for building it interactively as the `env` attribute. For example, if
+you have a local checkout of `random`, you can enter a development environment
+for it like this (if the dependencies in the development and packaged version
+match):
+
+```console
+$ cd ~/src/random
+$ nix-shell -A haskellPackages.random.env '<nixpkgs>'
+[nix-shell:~/src/random]$ ghc-pkg list
+/nix/store/a8hhl54xlzfizrhcf03c1l3f6l9l8qwv-ghc-9.2.4-with-packages/lib/ghc-9.2.4/package.conf.d
+    Cabal-3.6.3.0
+    array-0.5.4.0
+    base-4.16.3.0
+    binary-0.8.9.0
+    …
+    ghc-9.2.4
+    …
+```
+
+As you can see, the environment contains a GHC which is set up so it finds all
+dependencies of `random`. Note that this environment does not mirror
+the environment used to build the package, but is intended as a convenient
+tool for development and simple debugging. `env` relies on the `ghcWithPackages`
+wrapper which automatically injects a pre-populated package-db into every
+GHC invocation. In contrast, using `nix-shell -A haskellPackages.random` will
+not result in an environment in which the dependencies are in GHCs package
+database. Instead, the Haskell builder will pass in all dependencies explicitly
+via configure flags.
+
+`env` mirrors the normal derivation environment in one aspect: It does not include
+familiar development tools like `cabal-install`, since we rely on plain `Setup.hs`
+to build all packages. However, `cabal-install` will work as expected if in
+`PATH` (e.g. when installed globally and using a `nix-shell` without `--pure`).
+A declarative and pure way of adding arbitrary development tools is provided
+via [`shellFor`](#haskell-shellFor).
+
+When using `cabal-install` for dependency resolution you need to be a bit
+careful to achieve build purity. `cabal-install` will find and use all
+dependencies installed from the packages `env` via Nix, but it will also
+consult Hackage to potentially download and compile dependencies if it can’t
+find a valid build plan locally. To prevent this you can either never run
+`cabal update`, remove the cabal database from your `~/.cabal` folder or run
+`cabal` with `--offline`. Note though, that for some usecases `cabal2nix` needs
+the local Hackage db.
+
+Often you won't work on a package that is already part of `haskellPackages` or
+Hackage, so we first need to write a Nix expression to obtain the development
+environment from. Luckily, we can generate one very easily from an already
+existing cabal file using `cabal2nix`:
+
+```console
+$ ls
+my-project.cabal src …
+$ cabal2nix ./. > my-project.nix
+```
+
+The generated Nix expression evaluates to a function ready to be
+`callPackage`-ed. For now, we can add a minimal `default.nix` which does just
+that:
+
+```nix
+# Retrieve nixpkgs impurely from NIX_PATH for now, you can pin it instead, of course.
+{ pkgs ? import <nixpkgs> {} }:
+
+# use the nixpkgs default haskell package set
+pkgs.haskellPackages.callPackage ./my-project.nix { }
+```
+
+Using `nix-build default.nix` we can now build our project, but we can also
+enter a shell with all the package's dependencies available using `nix-shell
+-A env default.nix`. If you have `cabal-install` installed globally, it'll work
+inside the shell as expected.
+
+### shellFor {#haskell-shellFor}
+
+Having to install tools globally is obviously not great, especially if you want
+to provide a batteries-included `shell.nix` with your project. Luckily there's a
+proper tool for making development environments out of packages' build
+environments: `shellFor`, a function exposed by every haskell package set. It
+takes the following arguments and returns a derivation which is suitable as a
+development environment inside `nix-shell`:
+
+`packages`
+: This argument is used to select the packages for which to build the
+development environment. This should be a function which takes a haskell package
+set and returns a list of packages. `shellFor` will pass the used package set to
+this function and include all dependencies of the returned package in the build
+environment. This means you can reuse Nix expressions of packages included in
+nixpkgs, but also use local Nix expressions like this: `hpkgs: [
+(hpkgs.callPackage ./my-project.nix { }) ]`.
+
+`nativeBuildInputs`
+: Expects a list of derivations to add as build tools to the build environment.
+This is the place to add packages like `cabal-install`, `doctest` or `hlint`.
+Defaults to `[]`.
+
+`buildInputs`
+: Expects a list of derivations to add as library dependencies, like `openssl`.
+This is rarely necessary as the haskell package expressions usually track system
+dependencies as well. Defaults to `[]`. (see also
+[derivation dependencies](#haskell-derivation-deps))
+
+`withHoogle`
+: If this is true, `hoogle` will be added to `nativeBuildInputs`.
+Additionally, its database will be populated with all included dependencies,
+so you'll be able search through the documentation of your dependencies.
+Defaults to `false`.
+
+`genericBuilderArgsModifier`
+: This argument accepts a function allowing you to modify the arguments passed
+to `mkDerivation` in order to create the development environment. For example,
+`args: { doCheck = false; }` would cause the environment to not include any test
+dependencies. Defaults to `lib.id`.
+
+`doBenchmark`
+: This is a shortcut for enabling `doBenchmark` via `genericBuilderArgsModifier`.
+Setting it to `true` will cause the development environment to include all
+benchmark dependencies which would be excluded by default. Defaults to `false`.
+
+One neat property of `shellFor` is that it allows you to work on multiple
+packages using the same environment in conjunction with
+[cabal.project files][cabal-project-files].
+Say our example above depends on `distribution-nixpkgs` and we have a project
+file set up for both, we can add the following `shell.nix` expression:
+
+```nix
+{ pkgs ? import <nixpkgs> {} }:
+
+pkgs.haskellPackages.shellFor {
+  packages = hpkgs: [
+    # reuse the nixpkgs for this package
+    hpkgs.distribution-nixpkgs
+    # call our generated Nix expression manually
+    (hpkgs.callPackage ./my-project/my-project.nix { })
+  ];
+
+  # development tools we use
+  nativeBuildInputs = [
+    pkgs.cabal-install
+    pkgs.haskellPackages.doctest
+    pkgs.cabal2nix
+  ];
+
+  # Extra arguments are added to mkDerivation's arguments as-is.
+  # Since it adds all passed arguments to the shell environment,
+  # we can use this to set the environment variable the `Paths_`
+  # module of distribution-nixpkgs uses to search for bundled
+  # files.
+  # See also: https://cabal.readthedocs.io/en/latest/cabal-package.html#accessing-data-files-from-package-code
+  distribution_nixpkgs_datadir = toString ./distribution-nixpkgs;
+}
+```
+
+<!-- TODO(@sternenseemann): deps are not included if not selected -->
+
+### haskell-language-server {#haskell-language-server}
+
+To use HLS in short: Install `pkgs.haskell-language-server` e.g. in
+`nativeBuildInputs` in `shellFor` and use the `haskell-language-server-wrapper`
+command to run it. See the [HLS user guide] on how to configure your text
+editor to use HLS and how to test your setup.
+
+HLS needs to be compiled with the GHC version of the project you use it
+on.
+
+``pkgs.haskell-language-server`` provides
+``haskell-language-server-wrapper``, ``haskell-language-server``
+and ``haskell-language-server-x.x.x``
+binaries, where ``x.x.x`` is the GHC version for which it is compiled. By
+default, it only includes binaries for the current GHC version, to reduce
+closure size. The closure size is large, because HLS needs to be dynamically
+linked to work reliably. You can override the list of supported GHC versions
+with e.g.
+
+```nix
+pkgs.haskell-language-server.override { supportedGhcVersions = [ "90" "94" ]; }
+```
+Where all strings `version` are allowed such that
+`haskell.packages.ghc${version}` is an existing package set.
+
+When you run `haskell-language-server-wrapper` it will detect the GHC
+version used by the project you are working on (by asking e.g. cabal or
+stack) and pick the appropriate versioned binary from your path.
+
+Be careful when installing HLS globally and using a pinned nixpkgs for a
+Haskell project in a `nix-shell`. If the nixpkgs versions deviate to much
+(e.g., use different `glibc` versions) the `haskell-language-server-?.?.?`
+executable will try to detect these situations and refuse to start. It is
+recommended to obtain HLS via `nix-shell` from the nixpkgs version pinned in
+there instead.
+
+The top level `pkgs.haskell-language-server` attribute is just a convenience
+wrapper to make it possible to install HLS for multiple GHC versions at the
+same time. If you know, that you only use one GHC version, e.g., in a project
+specific `nix-shell` you can use
+`pkgs.haskellPackages.haskell-language-server` or
+`pkgs.haskell.packages.*.haskell-language-server` from the package set you use.
+
+If you use `nix-shell` for your development environments remember to start your
+editor in that environment. You may want to use something like `direnv` and/or an
+editor plugin to achieve this.
+
+## Overriding Haskell packages {#haskell-overriding-haskell-packages}
+
+### Overriding a single package {#haskell-overriding-a-single-package}
+
+<!-- TODO(@sternenseemann): we should document /somewhere/ that base == null etc. -->
+
+Like many language specific subsystems in nixpkgs, the Haskell infrastructure
+also has its own quirks when it comes to overriding. Overriding of the *inputs*
+to a package at least follows the standard procedure. For example, imagine you
+need to build `nix-tree` with a more recent version of `brick` than the default
+one provided by `haskellPackages`:
+
+```nix
+haskellPackages.nix-tree.override {
+  brick = haskellPackages.brick_0_67;
+}
+```
+
+<!-- TODO(@sternenseemann): This belongs in the next section
+One common problem you may run into with such an override is the build failing
+with “abort because of serious configure-time warning from Cabal”. When scrolling
+up, you'll usually notice that Cabal noticed that more than one versions of the same
+package was present in the dependency graph. This typically causes a later compilation
+failure (the error message `haskellPackages.mkDerivation` produces tries to save
+you the time of finding this out yourself, but if you wish to do so, you can
+disable it using `allowInconsistentDependencies`). Luckily, `haskellPackages` provides
+you with a tool to deal with this. `overrideScope` creates a new `haskellPackages`
+instance with the override applied *globally* for this package, so the dependency
+closure automatically uses a consistent version of the overridden package. E. g.
+if `haskell-ci` needs a recent version of `Cabal`, but also uses other packages
+that depend on that library, you may want to use:
+
+```nix
+haskellPackages.haskell-ci.overrideScope (self: super: {
+  Cabal = self.Cabal_3_6_2_0;
+})
+```
+
+-->
+
+The custom interface comes into play when you want to override the arguments
+passed to `haskellPackages.mkDerivation`. For this, the function `overrideCabal`
+from `haskell.lib.compose` is used. E.g., if you want to install a man page
+that is distributed with the package, you can do something like this:
+
+```nix
+haskell.lib.compose.overrideCabal (drv: {
+  postInstall = ''
+    ${drv.postInstall or ""}
+    install -Dm644 man/pnbackup.1 -t $out/share/man/man1
+  '';
+}) haskellPackages.pnbackup
+```
+
+`overrideCabal` takes two arguments:
+
+1. A function which receives all arguments passed to `haskellPackages.mkDerivation`
+   before and returns a set of arguments to replace (or add) with a new value.
+2. The Haskell derivation to override.
+
+The arguments are ordered so that you can easily create helper functions by making
+use of currying:
+
+```nix
+let
+  installManPage = haskell.lib.compose.overrideCabal (drv: {
+    postInstall = ''
+      ${drv.postInstall or ""}
+      install -Dm644 man/${drv.pname}.1 -t "$out/share/man/man1"
+    '';
+  });
+in
+
+installManPage haskellPackages.pnbackup
+```
+
+In fact, `haskell.lib.compose` already provides lots of useful helpers for common
+tasks, detailed in the next section. They are also structured in such a way that
+they can be combined using `lib.pipe`:
+
+```nix
+lib.pipe my-haskell-package [
+  # lift version bounds on dependencies
+  haskell.lib.compose.doJailbreak
+  # disable building the haddock documentation
+  haskell.lib.compose.dontHaddock
+  # pass extra package flag to Cabal's configure step
+  (haskell.lib.compose.enableCabalFlag "myflag")
+]
+```
+
+#### `haskell.lib.compose` {#haskell-haskell.lib.compose}
+
+The base interface for all overriding is the following function:
+
+`overrideCabal f drv`
+: Takes the arguments passed to obtain `drv` to `f` and uses the resulting
+attribute set to update the argument set. Then a recomputed version of `drv`
+using the new argument set is returned.
+
+<!--
+TODO(@sternenseemann): ideally we want to be more detailed here as well, but
+I want to avoid the documentation having to be kept in sync in too many places.
+We already document this stuff in the mkDerivation section and lib/compose.nix.
+Ideally this section would be generated from the latter in the future.
+-->
+
+All other helper functions are implemented in terms of `overrideCabal` and make
+common overrides shorter and more complicate ones trivial. The simple overrides
+which only change a single argument are only described very briefly in the
+following overview. Refer to the
+[documentation of `haskellPackages.mkDerivation`](#haskell-mkderivation)
+for a more detailed description of the effects of the respective arguments.
+
+##### Packaging Helpers {#haskell-packaging-helpers}
+
+`overrideSrc { src, version } drv`
+: Replace the source used for building `drv` with the path or derivation given
+as `src`. The `version` attribute is optional. Prefer this function over
+overriding `src` via `overrideCabal`, since it also automatically takes care of
+removing any Hackage revisions.
+
+<!-- TODO(@sternenseemann): deprecated
+
+`generateOptparseApplicativeCompletions list drv`
+: Generate and install shell completion files for the installed executables whose
+names are given via `list`. The executables need to be using `optparse-applicative`
+for this to work.
+-->
+
+`justStaticExecutables drv`
+: Only build and install the executables produced by `drv`, removing everything
+that may refer to other Haskell packages' store paths (like libraries and
+documentation). This dramatically reduces the closure size of the resulting
+derivation. Note that the executables are only statically linked against their
+Haskell dependencies, but will still link dynamically against libc, GMP and
+other system library dependencies. If dependencies use their Cabal-generated
+`Paths_*` module, this may not work as well if GHC's dead code elimination
+is unable to remove the references to the dependency's store path that module
+contains.
+
+`enableSeparateBinOutput drv`
+: Install executables produced by `drv` to a separate `bin` output. This
+has a similar effect as `justStaticExecutables`, but preserves the libraries
+and documentation in the `out` output alongside the `bin` output with a
+much smaller closure size.
+
+`markBroken drv`
+: Sets the `broken` flag to `true` for `drv`.
+
+`markUnbroken drv`, `unmarkBroken drv`
+: Set the `broken` flag to `false` for `drv`.
+
+`doDistribute drv`
+: Updates `hydraPlatforms` so that Hydra will build `drv`. This is
+sometimes necessary when working with versioned packages in
+`haskellPackages` which are not built by default.
+
+`dontDistribute drv`
+: Sets `hydraPlatforms` to `[]`, causing Hydra to skip this package
+altogether. Useful if it fails to evaluate cleanly and is causing
+noise in the evaluation errors tab on Hydra.
+
+##### Development Helpers {#haskell-development-helpers}
+
+`sdistTarball drv`
+: Create a source distribution tarball like those found on Hackage
+instead of building the package `drv`.
+
+`documentationTarball drv`
+: Create a documentation tarball suitable for uploading to Hackage
+instead of building the package `drv`.
+
+`buildFromSdist drv`
+: Uses `sdistTarball drv` as the source to compile `drv`. This helps to catch
+packaging bugs when building from a local directory, e.g. when required files
+are missing from `extra-source-files`.
+
+`failOnAllWarnings drv`
+: Enables all warnings GHC supports and makes it fail the build if any of them
+are emitted.
+
+<!-- TODO(@sternenseemann):
+`checkUnusedPackages opts drv`
+: Adds an extra check to `postBuild` which fails the build if any dependency
+taken as an input is not used. The `opts` attribute set allows relaxing this
+check.
+-->
+
+`enableDWARFDebugging drv`
+: Compiles the package with additional debug symbols enabled, useful
+for debugging with e.g. `gdb`.
+
+`doStrip drv`
+: Sets `doStrip` to `true` for `drv`.
+
+`dontStrip drv`
+: Sets `doStrip` to `false` for `drv`.
+
+<!-- TODO(@sternenseemann): shellAware -->
+
+##### Trivial Helpers {#haskell-trivial-helpers}
+
+`doJailbreak drv`
+: Sets the `jailbreak` argument to `true` for `drv`.
+
+`dontJailbreak drv`
+: Sets the `jailbreak` argument to `false` for `drv`.
+
+`doHaddock drv`
+: Sets `doHaddock` to `true` for `drv`.
+
+`dontHaddock drv`
+: Sets `doHaddock` to `false` for `drv`. Useful if the build of a package is
+failing because of e.g. a syntax error in the Haddock documentation.
+
+`doHyperlinkSource drv`
+: Sets `hyperlinkSource` to `true` for `drv`.
+
+`dontHyperlinkSource drv`
+: Sets `hyperlinkSource` to `false` for `drv`.
+
+`doCheck drv`
+: Sets `doCheck` to `true` for `drv`.
+
+`dontCheck drv`
+: Sets `doCheck` to `false` for `drv`. Useful if a package has a broken,
+flaky or otherwise problematic test suite breaking the build.
+
+`dontCheckIf condition drv`
+: Sets `doCheck` to `false` for `drv`, but only if `condition` applies.
+Otherwise it's a no-op. Useful to conditionally disable tests for a package
+without interfering with previous overrides or default values.
+
+<!-- Purposefully omitting the non-list variants here. They are a bit
+ugly, and we may want to deprecate them at some point. -->
+
+`appendConfigureFlags list drv`
+: Adds the strings in `list` to the `configureFlags` argument for `drv`.
+
+`enableCabalFlag flag drv`
+: Makes sure that the Cabal flag `flag` is enabled in Cabal's configure step.
+
+`disableCabalFlag flag drv`
+: Makes sure that the Cabal flag `flag` is disabled in Cabal's configure step.
+
+`appendBuildFlags list drv`
+: Adds the strings in `list` to the `buildFlags` argument for `drv`.
+
+<!-- TODO(@sternenseemann): removeConfigureFlag -->
+
+`appendPatches list drv`
+: Adds the `list` of derivations or paths to the `patches` argument for `drv`.
+
+<!-- TODO(@sternenseemann): link dep section -->
+
+`addBuildTools list drv`
+: Adds the `list` of derivations to the `buildTools` argument for `drv`.
+
+`addExtraLibraries list drv`
+: Adds the `list` of derivations to the `extraLibraries` argument for `drv`.
+
+`addBuildDepends list drv`
+: Adds the `list` of derivations to the `buildDepends` argument for `drv`.
+
+`addTestToolDepends list drv`
+: Adds the `list` of derivations to the `testToolDepends` argument for `drv`.
+
+`addPkgconfigDepends list drv`
+: Adds the `list` of derivations to the `pkg-configDepends` argument for `drv`.
+
+`addSetupDepends list drv`
+: Adds the `list` of derivations to the `setupHaskellDepends` argument for `drv`.
+
+`doBenchmark drv`
+: Set `doBenchmark` to `true` for `drv`. Useful if your development
+environment is missing the dependencies necessary for compiling the
+benchmark component.
+
+`dontBenchmark drv`
+: Set `doBenchmark` to `false` for `drv`.
+
+`setBuildTargets drv list`
+: Sets the `buildTarget` argument for `drv` so that the targets specified in `list` are built.
+
+`doCoverage drv`
+: Sets the `doCoverage` argument to `true` for `drv`.
+
+`dontCoverage drv`
+: Sets the `doCoverage` argument to `false` for `drv`.
+
+`enableExecutableProfiling drv`
+: Sets the `enableExecutableProfiling` argument to `true` for `drv`.
+
+`disableExecutableProfiling drv`
+: Sets the `enableExecutableProfiling` argument to `false` for `drv`.
+
+`enableLibraryProfiling drv`
+: Sets the `enableLibraryProfiling` argument to `true` for `drv`.
+
+`disableLibraryProfiling drv`
+: Sets the `enableLibraryProfiling` argument to `false` for `drv`.
+
+#### Library functions in the Haskell package sets {#haskell-package-set-lib-functions}
+
+Some library functions depend on packages from the Haskell package sets. Thus they are
+exposed from those instead of from `haskell.lib.compose` which can only access what is
+passed directly to it. When using the functions below, make sure that you are obtaining them
+from the same package set (`haskellPackages`, `haskell.packages.ghc944` etc.) as the packages
+you are working with or – even better – from the `self`/`final` fix point of your overlay to
+`haskellPackages`.
+
+Note: Some functions like `shellFor` that are not intended for overriding per se, are omitted
+in this section. <!-- TODO(@sternenseemann): note about ifd section -->
+
+`cabalSdist { src, name ? ... }`
+: Generates the Cabal sdist tarball for `src`, suitable for uploading to Hackage.
+Contrary to `haskell.lib.compose.sdistTarball`, it uses `cabal-install` over `Setup.hs`,
+so it is usually faster: No build dependencies need to be downloaded, and we can
+skip compiling `Setup.hs`.
+
+`buildFromCabalSdist drv`
+: Build `drv`, but run its `src` attribute through `cabalSdist` first. Useful for catching
+files necessary for compilation that are missing from the sdist.
+
+`generateOptparseApplicativeCompletions list drv`
+: Generate and install shell completion files for the installed executables whose
+names are given via `list`. The executables need to be using `optparse-applicative`
+for [this to work][optparse-applicative-completions].
+Note that this feature is automatically disabled when cross-compiling, since it
+requires executing the binaries in question.
+
+<!--
+
+TODO(@NixOS/haskell): finish these planned sections
+### Overriding the entire package set
+
+
+## Import-from-Derivation helpers
+
+* `callCabal2nix`
+* `callHackage`, `callHackageDirect`
+* `developPackage`
+
+## Contributing {#haskell-contributing}
+
+### Fixing a broken package {#haskell-fixing-a-broken-package}
+
+### Package set generation {#haskell-package-set-generation}
+
+### Packaging a Haskell project
+
+### Backporting {#haskell-backporting}
+
+Backporting changes to a stable NixOS version in general is covered
+in nixpkgs' `CONTRIBUTING.md` in general. In particular refer to the
+[backporting policy](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md#criteria-for-backporting-changes)
+to check if the change you have in mind may be backported.
+
+This section focuses on how to backport a package update (e.g. a
+bug fix or security release). Fixing a broken package works like
+it does for the unstable branches.
+
+-->
+
+## F.A.Q. {#haskell-faq}
+
+### Why is topic X not covered in this section? Why is section Y missing? {#haskell-why-not-covered}
+
+We have been working on [moving the nixpkgs Haskell documentation back into the
+nixpkgs manual](https://github.com/NixOS/nixpkgs/issues/121403). Since this
+process has not been completed yet, you may find some topics missing here
+covered in the old [haskell4nix docs](https://haskell4nix.readthedocs.io/).
+
+If you feel any important topic is not documented at all, feel free to comment
+on the issue linked above.
+
+### How to enable or disable profiling builds globally? {#haskell-faq-override-profiling}
+
+By default, Nixpkgs builds a profiling version of each Haskell library. The
+exception to this rule are some platforms where it is disabled due to concerns
+over output size. You may want to…
+
+* …enable profiling globally so that you can build a project you are working on
+  with profiling ability giving you insight in the time spent across your code
+  and code you depend on using [GHC's profiling feature][profiling].
+
+* …disable profiling (globally) to reduce the time spent building the profiling
+  versions of libraries which a significant amount of build time is spent on
+  (although they are not as expensive as the “normal” build of a Haskell library).
+
+::: {.note}
+The method described below affects the build of all libraries in the
+respective Haskell package set as well as GHC. If your choices differ from
+Nixpkgs' default for your (host) platform, you will lose the ability to
+substitute from the official binary cache.
+
+If you are concerned about build times and thus want to disable profiling, it
+probably makes sense to use `haskell.lib.compose.disableLibraryProfiling` (see
+[](#haskell-trivial-helpers)) on the packages you are building locally while
+continuing to substitute their dependencies and GHC.
+:::
+
+Since we need to change the profiling settings for the desired Haskell package
+set _and_ GHC (as the core libraries like `base`, `filepath` etc. are bundled
+with GHC), it is recommended to use overlays for Nixpkgs to change them.
+Since the interrelated parts, i.e. the package set and GHC, are connected
+via the Nixpkgs fixpoint, we need to modify them both in a way that preserves
+their connection (or else we'd have to wire it up again manually). This is
+achieved by changing GHC and the package set in separate overlays to prevent
+the package set from pulling in GHC from `prev`.
+
+The result is two overlays like the ones shown below. Adjustable parts are
+annotated with comments, as are any optional or alternative ways to achieve
+the desired profiling settings without causing too many rebuilds.
+
+<!-- TODO(@sternenseemann): buildHaskellPackages != haskellPackages with this overlay,
+affected by https://github.com/NixOS/nixpkgs/issues/235960 which needs to be fixed
+properly still.
+-->
+
+```nix
+let
+  # Name of the compiler and package set you want to change. If you are using
+  # the default package set `haskellPackages`, you need to look up what version
+  # of GHC it currently uses (note that this is subject to change).
+  ghcName = "ghc92";
+  # Desired new setting
+  enableProfiling = true;
+in
+
+[
+  # The first overlay modifies the GHC derivation so that it does or does not
+  # build profiling versions of the core libraries bundled with it. It is
+  # recommended to only use such an overlay if you are enabling profiling on a
+  # platform that doesn't by default, because compiling GHC from scratch is
+  # quite expensive.
+  (final: prev:
+  let
+    inherit (final) lib;
+  in
+
+  {
+    haskell = prev.haskell // {
+      compiler = prev.haskell.compiler // {
+        ${ghcName} = prev.haskell.compiler.${ghcName}.override {
+          # Unfortunately, the GHC setting is named differently for historical reasons
+          enableProfiledLibs = enableProfiling;
+        };
+      };
+    };
+  })
+
+  (final: prev:
+  let
+    inherit (final) lib;
+    haskellLib = final.haskell.lib.compose;
+  in
+
+  {
+    haskell = prev.haskell // {
+      packages = prev.haskell.packages // {
+        ${ghcName} = prev.haskell.packages.${ghcName}.override {
+          overrides = hfinal: hprev: {
+            mkDerivation = args: hprev.mkDerivation (args // {
+              # Since we are forcing our ideas upon mkDerivation, this change will
+              # affect every package in the package set.
+              enableLibraryProfiling = enableProfiling;
+
+              # To actually use profiling on an executable, executable profiling
+              # needs to be enabled for the executable you want to profile. You
+              # can either do this globally or…
+              enableExecutableProfiling = enableProfiling;
+            });
+
+            # …only for the package that contains an executable you want to profile.
+            # That saves on unnecessary rebuilds for packages that you only depend
+            # on for their library, but also contain executables (e.g. pandoc).
+            my-executable = haskellLib.enableExecutableProfiling hprev.my-executable;
+
+            # If you are disabling profiling to save on build time, but want to
+            # retain the ability to substitute from the binary cache. Drop the
+            # override for mkDerivation above and instead have an override like
+            # this for the specific packages you are building locally and want
+            # to make cheaper to build.
+            my-library = haskellLib.disableLibraryProfiling hprev.my-library;
+          };
+        };
+      };
+    };
+  })
+]
+```
+
+<!-- TODO(@sternenseemann): write overriding mkDerivation, overriding GHC, and
+overriding the entire package set sections and link to them from here where
+relevant.
+-->
+
+[Stackage]: https://www.stackage.org
+[cabal-project-files]: https://cabal.readthedocs.io/en/latest/cabal-project.html
+[cabal2nix]: https://github.com/nixos/cabal2nix
+[cpphs]: https://Hackage.haskell.org/package/cpphs
+[haddock-hoogle-option]: https://haskell-haddock.readthedocs.io/en/latest/invoking.html#cmdoption-hoogle
+[haddock-hyperlinked-source-option]: https://haskell-haddock.readthedocs.io/en/latest/invoking.html#cmdoption-hyperlinked-source
+[haddock]: https://www.haskell.org/haddock/
+[haskell-program-coverage]: https://downloads.haskell.org/~ghc/latest/docs/html/users_guide/profiling.html#observing-code-coverage
+[haskell.nix]: https://input-output-hk.github.io/haskell.nix/index.html
+[HLS user guide]: https://haskell-language-server.readthedocs.io/en/latest/configuration.html#configuring-your-editor
+[hoogle]: https://wiki.haskell.org/Hoogle
+[incremental-builds]: https://www.haskellforall.com/2022/12/nixpkgs-support-for-incremental-haskell.html
+[jailbreak-cabal]: https://github.com/NixOS/jailbreak-cabal/
+[multiple-outputs]: https://nixos.org/manual/nixpkgs/stable/#chap-multiple-output
+[optparse-applicative-completions]: https://github.com/pcapriotti/optparse-applicative/blob/7726b63796aa5d0df82e926d467f039b78ca09e2/README.md#bash-zsh-and-fish-completions
+[profiling-detail]: https://cabal.readthedocs.io/en/latest/cabal-project.html#cfg-field-profiling-detail
+[profiling]: https://downloads.haskell.org/~ghc/latest/docs/html/users_guide/profiling.html
+[search.nixos.org]: https://search.nixos.org
+[turtle]: https://hackage.haskell.org/package/turtle
diff --git a/nixpkgs/doc/languages-frameworks/hy.section.md b/nixpkgs/doc/languages-frameworks/hy.section.md
new file mode 100644
index 000000000000..49309e4819f5
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/hy.section.md
@@ -0,0 +1,31 @@
+# Hy {#sec-language-hy}
+
+## Installation {#ssec-hy-installation}
+
+### Installation without packages {#installation-without-packages}
+
+You can install `hy` via nix-env or by adding it to `configuration.nix` by referring to it as a `hy` attribute. This kind of installation adds `hy` to your environment and it successfully works with `python3`.
+
+::: {.caution}
+Packages that are installed with your python derivation, are not accessible by `hy` this way.
+:::
+
+### Installation with packages {#installation-with-packages}
+
+Creating `hy` derivation with custom `python` packages is really simple and similar to the way that python does it. Attribute `hy` provides function `withPackages` that creates custom `hy` derivation with specified packages.
+
+For example if you want to create shell with `matplotlib` and `numpy`, you can do it like so:
+
+```ShellSession
+$ nix-shell -p "hy.withPackages (ps: with ps; [ numpy matplotlib ])"
+```
+
+Or if you want to extend your `configuration.nix`:
+```nix
+{ # ...
+
+  environment.systemPackages = with pkgs; [
+    (hy.withPackages (py-packages: with py-packages; [ numpy matplotlib ]))
+  ];
+}
+```
diff --git a/nixpkgs/doc/languages-frameworks/idris.section.md b/nixpkgs/doc/languages-frameworks/idris.section.md
new file mode 100644
index 000000000000..0fa828825749
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/idris.section.md
@@ -0,0 +1,143 @@
+# Idris {#idris}
+
+## Installing Idris {#installing-idris}
+
+The easiest way to get a working idris version is to install the `idris` attribute:
+
+```ShellSession
+$ nix-env -f "<nixpkgs>" -iA idris
+```
+
+This however only provides the `prelude` and `base` libraries. To install idris with additional libraries, you can use the `idrisPackages.with-packages` function, e.g. in an overlay in `~/.config/nixpkgs/overlays/my-idris.nix`:
+
+```nix
+self: super: {
+  myIdris = with self.idrisPackages; with-packages [ contrib pruviloj ];
+}
+```
+
+And then:
+
+```ShellSession
+$ # On NixOS
+$ nix-env -iA nixos.myIdris
+$ # On non-NixOS
+$ nix-env -iA nixpkgs.myIdris
+```
+
+To see all available Idris packages:
+
+```ShellSession
+$ # On NixOS
+$ nix-env -qaPA nixos.idrisPackages
+$ # On non-NixOS
+$ nix-env -qaPA nixpkgs.idrisPackages
+```
+
+Similarly, entering a `nix-shell`:
+
+```ShellSession
+$ nix-shell -p 'idrisPackages.with-packages (with idrisPackages; [ contrib pruviloj ])'
+```
+
+## Starting Idris with library support {#starting-idris-with-library-support}
+
+To have access to these libraries in idris, call it with an argument `-p <library name>` for each library:
+
+```ShellSession
+$ nix-shell -p 'idrisPackages.with-packages (with idrisPackages; [ contrib pruviloj ])'
+[nix-shell:~]$ idris -p contrib -p pruviloj
+```
+
+A listing of all available packages the Idris binary has access to is available via `--listlibs`:
+
+```ShellSession
+$ idris --listlibs
+00prelude-idx.ibc
+pruviloj
+base
+contrib
+prelude
+00pruviloj-idx.ibc
+00base-idx.ibc
+00contrib-idx.ibc
+```
+
+## Building an Idris project with Nix {#building-an-idris-project-with-nix}
+
+As an example of how a Nix expression for an Idris package can be created, here is the one for `idrisPackages.yaml`:
+
+```nix
+{ lib
+, build-idris-package
+, fetchFromGitHub
+, contrib
+, lightyear
+}:
+build-idris-package  {
+  name = "yaml";
+  version = "2018-01-25";
+
+  # This is the .ipkg file that should be built, defaults to the package name
+  # In this case it should build `Yaml.ipkg` instead of `yaml.ipkg`
+  # This is only necessary because the yaml packages ipkg file is
+  # different from its package name here.
+  ipkgName = "Yaml";
+  # Idris dependencies to provide for the build
+  idrisDeps = [ contrib lightyear ];
+
+  src = fetchFromGitHub {
+    owner = "Heather";
+    repo = "Idris.Yaml";
+    rev = "5afa51ffc839844862b8316faba3bafa15656db4";
+    hash = "sha256-h28F9EEPuvab6zrfeE+0k1XGQJGwINnsJEG8yjWIl7w=";
+  };
+
+  meta = {
+    description = "Idris YAML lib";
+    homepage = "https://github.com/Heather/Idris.Yaml";
+    license = lib.licenses.mit;
+    maintainers = [ lib.maintainers.brainrape ];
+  };
+}
+```
+
+Assuming this file is saved as `yaml.nix`, it's buildable using
+
+```ShellSession
+$ nix-build -E '(import <nixpkgs> {}).idrisPackages.callPackage ./yaml.nix {}'
+```
+
+Or it's possible to use
+
+```nix
+with import <nixpkgs> {};
+
+{
+  yaml = idrisPackages.callPackage ./yaml.nix {};
+}
+```
+
+in another file (say `default.nix`) to be able to build it with
+
+```ShellSession
+$ nix-build -A yaml
+```
+
+## Passing options to `idris` commands {#passing-options-to-idris-commands}
+
+The `build-idris-package` function provides also optional input values to set additional options for the used `idris` commands.
+
+Specifically, you can set `idrisBuildOptions`, `idrisTestOptions`, `idrisInstallOptions` and `idrisDocOptions` to provide additional options to the `idris` command respectively when building, testing, installing and generating docs for your package.
+
+For example you could set
+
+```nix
+build-idris-package {
+  idrisBuildOptions = [ "--log" "1" "--verbose" ];
+
+  # ...
+}
+```
+
+to require verbose output during `idris` build phase.
diff --git a/nixpkgs/doc/languages-frameworks/idris2.section.md b/nixpkgs/doc/languages-frameworks/idris2.section.md
new file mode 100644
index 000000000000..f1f0277cc609
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/idris2.section.md
@@ -0,0 +1,47 @@
+# Idris2 {#sec-idris2}
+
+In addition to exposing the Idris2 compiler itself, Nixpkgs exposes an `idris2Packages.buildIdris` helper to make it a bit more ergonomic to build Idris2 executables or libraries.
+
+The `buildIdris` function takes an attribute set that defines at a minimum the `src` and `ipkgName` of the package to be built and any `idrisLibraries` required to build it. The `src` is the same source you're familiar with and the `ipkgName` must be the name of the `ipkg` file for the project (omitting the `.ipkg` extension). The `idrisLibraries` is a list of other library derivations created with `buildIdris`. You can optionally specify other derivation properties as needed but sensible defaults for `configurePhase`, `buildPhase`, and `installPhase` are provided.
+
+Importantly, `buildIdris` does not create a single derivation but rather an attribute set with two properties: `executable` and `library`. The `executable` property is a derivation and the `library` property is a function that will return a derivation for the library with or without source code included. Source code need not be included unless you are aiming to use IDE or LSP features that are able to jump to definitions within an editor.
+
+A simple example of a fully packaged library would be the [`LSP-lib`](https://github.com/idris-community/LSP-lib) found in the `idris-community` GitHub organization.
+```nix
+{ fetchFromGitHub, idris2Packages }:
+let lspLibPkg = idris2Packages.buildIdris {
+  ipkgName = "lsp-lib";
+  src = fetchFromGitHub {
+   owner = "idris-community";
+   repo = "LSP-lib";
+   rev = "main";
+   hash = "sha256-EvSyMCVyiy9jDZMkXQmtwwMoLaem1GsKVFqSGNNHHmY=";
+  };
+  idrisLibraries = [ ];
+};
+in lspLibPkg.library
+```
+
+The above results in a derivation with the installed library results (with sourcecode).
+
+A slightly more involved example of a fully packaged executable would be the [`idris2-lsp`](https://github.com/idris-community/idris2-lsp) which is an Idris2 language server that uses the `LSP-lib` found above.
+```nix
+{ callPackage, fetchFromGitHub, idris2Packages }:
+
+# Assuming the previous example lives in `lsp-lib.nix`:
+let lspLib = callPackage ./lsp-lib.nix { };
+    lspPkg = idris2Packages.buildIdris {
+      ipkgName = "idris2-lsp";
+      src = fetchFromGitHub {
+         owner = "idris-community";
+         repo = "idris2-lsp";
+         rev = "main";
+         hash = "sha256-vQTzEltkx7uelDtXOHc6QRWZ4cSlhhm5ziOqWA+aujk=";
+      };
+      idrisLibraries = [(idris2Packages.idris2Api { }) (lspLib { })];
+    };
+in lspPkg.executable
+```
+
+The above uses the default value of `withSource = false` for both of the two required Idris libraries that the `idris2-lsp` executable depends on. `idris2Api` in the above derivation comes built in with `idris2Packages`. This library exposes many of the otherwise internal APIs of the Idris2 compiler.
+
diff --git a/nixpkgs/doc/languages-frameworks/index.md b/nixpkgs/doc/languages-frameworks/index.md
new file mode 100644
index 000000000000..67107fb5b687
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/index.md
@@ -0,0 +1,47 @@
+# Languages and frameworks {#chap-language-support}
+
+The [standard build environment](#chap-stdenv) makes it easy to build typical Autotools-based packages with very little code. Any other kind of package can be accommodated by overriding the appropriate phases of `stdenv`. However, there are specialised functions in Nixpkgs to easily build packages for other programming languages, such as Perl or Haskell. These are described in this chapter.
+
+```{=include=} sections
+agda.section.md
+android.section.md
+beam.section.md
+bower.section.md
+chicken.section.md
+coq.section.md
+crystal.section.md
+cuda.section.md
+cuelang.section.md
+dart.section.md
+dhall.section.md
+dotnet.section.md
+emscripten.section.md
+gnome.section.md
+go.section.md
+haskell.section.md
+hy.section.md
+idris.section.md
+idris2.section.md
+ios.section.md
+java.section.md
+javascript.section.md
+julia.section.md
+lisp.section.md
+lua.section.md
+maven.section.md
+nim.section.md
+ocaml.section.md
+octave.section.md
+perl.section.md
+php.section.md
+pkg-config.section.md
+python.section.md
+qt.section.md
+r.section.md
+ruby.section.md
+rust.section.md
+swift.section.md
+texlive.section.md
+titanium.section.md
+vim.section.md
+```
diff --git a/nixpkgs/doc/languages-frameworks/ios.section.md b/nixpkgs/doc/languages-frameworks/ios.section.md
new file mode 100644
index 000000000000..eb8e2ca55326
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/ios.section.md
@@ -0,0 +1,225 @@
+# iOS {#ios}
+
+This component is basically a wrapper/workaround that makes it possible to
+expose an Xcode installation as a Nix package by means of symlinking to the
+relevant executables on the host system.
+
+Since Xcode can't be packaged with Nix, nor we can publish it as a Nix package
+(because of its license) this is basically the only integration strategy
+making it possible to do iOS application builds that integrate with other
+components of the Nix ecosystem
+
+The primary objective of this project is to use the Nix expression language to
+specify how iOS apps can be built from source code, and to automatically spawn
+iOS simulator instances for testing.
+
+This component also makes it possible to use [Hydra](https://nixos.org/hydra),
+the Nix-based continuous integration server to regularly build iOS apps and to
+do wireless ad-hoc installations of enterprise IPAs on iOS devices through
+Hydra.
+
+The Xcode build environment implements a number of features.
+
+## Deploying a proxy component wrapper exposing Xcode {#deploying-a-proxy-component-wrapper-exposing-xcode}
+
+The first use case is deploying a Nix package that provides symlinks to the Xcode
+installation on the host system. This package can be used as a build input to
+any build function implemented in the Nix expression language that requires
+Xcode.
+
+```nix
+let
+  pkgs = import <nixpkgs> {};
+
+  xcodeenv = import ./xcodeenv {
+    inherit (pkgs) stdenv;
+  };
+in
+xcodeenv.composeXcodeWrapper {
+  version = "9.2";
+  xcodeBaseDir = "/Applications/Xcode.app";
+}
+```
+
+By deploying the above expression with `nix-build` and inspecting its content
+you will notice that several Xcode-related executables are exposed as a Nix
+package:
+
+```bash
+$ ls result/bin
+lrwxr-xr-x  1 sander  staff  94  1 jan  1970 Simulator -> /Applications/Xcode.app/Contents/Developer/Applications/Simulator.app/Contents/MacOS/Simulator
+lrwxr-xr-x  1 sander  staff  17  1 jan  1970 codesign -> /usr/bin/codesign
+lrwxr-xr-x  1 sander  staff  17  1 jan  1970 security -> /usr/bin/security
+lrwxr-xr-x  1 sander  staff  21  1 jan  1970 xcode-select -> /usr/bin/xcode-select
+lrwxr-xr-x  1 sander  staff  61  1 jan  1970 xcodebuild -> /Applications/Xcode.app/Contents/Developer/usr/bin/xcodebuild
+lrwxr-xr-x  1 sander  staff  14  1 jan  1970 xcrun -> /usr/bin/xcrun
+```
+
+## Building an iOS application {#building-an-ios-application}
+
+We can build an iOS app executable for the simulator, or an IPA/xcarchive file
+for release purposes, e.g. ad-hoc, enterprise or store installations, by
+executing the `xcodeenv.buildApp {}` function:
+
+```nix
+let
+  pkgs = import <nixpkgs> {};
+
+  xcodeenv = import ./xcodeenv {
+    inherit (pkgs) stdenv;
+  };
+in
+xcodeenv.buildApp {
+  name = "MyApp";
+  src = ./myappsources;
+  sdkVersion = "11.2";
+
+  target = null; # Corresponds to the name of the app by default
+  configuration = null; # Release for release builds, Debug for debug builds
+  scheme = null; # -scheme will correspond to the app name by default
+  sdk = null; # null will set it to 'iphonesimulator` for simulator builds or `iphoneos` to real builds
+  xcodeFlags = "";
+
+  release = true;
+  certificateFile = ./mycertificate.p12;
+  certificatePassword = "secret";
+  provisioningProfile = ./myprovisioning.profile;
+  signMethod = "ad-hoc"; # 'enterprise' or 'store'
+  generateIPA = true;
+  generateXCArchive = false;
+
+  enableWirelessDistribution = true;
+  installURL = "/installipa.php";
+  bundleId = "mycompany.myapp";
+  appVersion = "1.0";
+
+  # Supports all xcodewrapper parameters as well
+  xcodeBaseDir = "/Applications/Xcode.app";
+}
+```
+
+The above function takes a variety of parameters:
+
+* The `name` and `src` parameters are mandatory and specify the name of the app
+  and the location where the source code resides
+* `sdkVersion` specifies which version of the iOS SDK to use.
+
+It also possible to adjust the `xcodebuild` parameters. This is only needed in
+rare circumstances. In most cases the default values should suffice:
+
+* Specifies which `xcodebuild` target to build. By default it takes the target
+  that has the same name as the app.
+* The `configuration` parameter can be overridden if desired. By default, it
+  will do a debug build for the simulator and a release build for real devices.
+* The `scheme` parameter specifies which `-scheme` parameter to propagate to
+  `xcodebuild`. By default, it corresponds to the app name.
+* The `sdk` parameter specifies which SDK to use. By default, it picks
+  `iphonesimulator` for simulator builds and `iphoneos` for release builds.
+* The `xcodeFlags` parameter specifies arbitrary command line parameters that
+  should be propagated to `xcodebuild`.
+
+By default, builds are carried out for the iOS simulator. To do release builds
+(builds for real iOS devices), you must set the `release` parameter to `true`.
+In addition, you need to set the following parameters:
+
+* `certificateFile` refers to a P12 certificate file.
+* `certificatePassword` specifies the password of the P12 certificate.
+* `provisioningProfile` refers to the provision profile needed to sign the app
+* `signMethod` should refer to `ad-hoc` for signing the app with an ad-hoc
+  certificate, `enterprise` for enterprise certificates and `app-store` for App
+  store certificates.
+* `generateIPA` specifies that we want to produce an IPA file (this is probably
+  what you want)
+* `generateXCArchive` specifies that we want to produce an xcarchive file.
+
+When building IPA files on Hydra and when it is desired to allow iOS devices to
+install IPAs by browsing to the Hydra build products page, you can enable the
+`enableWirelessDistribution` parameter.
+
+When enabled, you need to configure the following options:
+
+* The `installURL` parameter refers to the URL of a PHP script that composes the
+  `itms-services://` URL allowing iOS devices to install the IPA file.
+* `bundleId` refers to the bundle ID value of the app
+* `appVersion` refers to the app's version number
+
+To use wireless adhoc distributions, you must also install the corresponding
+PHP script on a web server (see section: 'Installing the PHP script for wireless
+ad hoc installations from Hydra' for more information).
+
+In addition to the build parameters, you can also specify any parameters that
+the `xcodeenv.composeXcodeWrapper {}` function takes. For example, the
+`xcodeBaseDir` parameter can be overridden to refer to a different Xcode
+version.
+
+## Spawning simulator instances {#spawning-simulator-instances}
+
+In addition to building iOS apps, we can also automatically spawn simulator
+instances:
+
+```nix
+let
+  pkgs = import <nixpkgs> {};
+
+  xcodeenv = import ./xcodeenv {
+    inherit (pkgs) stdenv;
+  };
+in
+xcode.simulateApp {
+  name = "simulate";
+
+  # Supports all xcodewrapper parameters as well
+  xcodeBaseDir = "/Applications/Xcode.app";
+}
+```
+
+The above expression produces a script that starts the simulator from the
+provided Xcode installation. The script can be started as follows:
+
+```bash
+./result/bin/run-test-simulator
+```
+
+By default, the script will show an overview of UDID for all available simulator
+instances and asks you to pick one. You can also provide a UDID as a
+command-line parameter to launch an instance automatically:
+
+```bash
+./result/bin/run-test-simulator 5C93129D-CF39-4B1A-955F-15180C3BD4B8
+```
+
+You can also extend the simulator script to automatically deploy and launch an
+app in the requested simulator instance:
+
+```nix
+let
+  pkgs = import <nixpkgs> {};
+
+  xcodeenv = import ./xcodeenv {
+    inherit (pkgs) stdenv;
+  };
+in
+xcode.simulateApp {
+  name = "simulate";
+  bundleId = "mycompany.myapp";
+  app = xcode.buildApp {
+    # ...
+  };
+
+  # Supports all xcodewrapper parameters as well
+  xcodeBaseDir = "/Applications/Xcode.app";
+}
+```
+
+By providing the result of an `xcode.buildApp {}` function and configuring the
+app bundle id, the app gets deployed automatically and started.
+
+## Troubleshooting {#troubleshooting}
+
+In some rare cases, it may happen that after a failure, changes are not picked
+up. Most likely, this is caused by a derived data cache that Xcode maintains.
+To wipe it you can run:
+
+```bash
+$ rm -rf ~/Library/Developer/Xcode/DerivedData
+```
diff --git a/nixpkgs/doc/languages-frameworks/java.section.md b/nixpkgs/doc/languages-frameworks/java.section.md
new file mode 100644
index 000000000000..6d56ffcd4503
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/java.section.md
@@ -0,0 +1,131 @@
+# Java {#sec-language-java}
+
+Ant-based Java packages are typically built from source as follows:
+
+```nix
+stdenv.mkDerivation {
+  pname = "...";
+  version = "...";
+
+  src = fetchurl { /* ... */ };
+
+  nativeBuildInputs = [
+    ant
+    jdk
+    stripJavaArchivesHook # removes timestamp metadata from jar files
+  ];
+
+  buildPhase = ''
+    runHook preBuild
+    ant # build the project using ant
+    runHook postBuild
+  '';
+
+  installPhase = ''
+    runHook preInstall
+
+    # copy generated jar file(s) to an appropriate location in $out
+    install -Dm644 build/foo.jar $out/share/java/foo.jar
+
+    runHook postInstall
+  '';
+}
+```
+
+Note that `jdk` is an alias for the OpenJDK (self-built where available,
+or pre-built via Zulu). Platforms with OpenJDK not (yet) in Nixpkgs
+(`Aarch32`, `Aarch64`) point to the (unfree) `oraclejdk`.
+
+Also note that not using `stripJavaArchivesHook` will likely cause the
+generated `.jar` files to be non-deterministic, which is not optimal.
+Using it, however, does not always guarantee reproducibility.
+
+JAR files that are intended to be used by other packages should be
+installed in `$out/share/java`. JDKs have a stdenv setup hook that add
+any JARs in the `share/java` directories of the build inputs to the
+`CLASSPATH` environment variable. For instance, if the package `libfoo`
+installs a JAR named `foo.jar` in its `share/java` directory, and
+another package declares the attribute
+
+```nix
+{
+  buildInputs = [ libfoo ];
+  nativeBuildInputs = [ jdk ];
+}
+```
+
+then `CLASSPATH` will be set to
+`/nix/store/...-libfoo/share/java/foo.jar`.
+
+Private JARs should be installed in a location like
+`$out/share/package-name`.
+
+If your Java package provides a program, you need to generate a wrapper
+script to run it using a JRE. You can use `makeWrapper` for this:
+
+```nix
+{
+  nativeBuildInputs = [ makeWrapper ];
+
+  installPhase = ''
+    mkdir -p $out/bin
+    makeWrapper ${jre}/bin/java $out/bin/foo \
+      --add-flags "-cp $out/share/java/foo.jar org.foo.Main"
+  '';
+}
+```
+
+Since the introduction of the Java Platform Module System in Java 9,
+Java distributions typically no longer ship with a general-purpose JRE:
+instead, they allow generating a JRE with only the modules required for
+your application(s). Because we can't predict what modules will be
+needed on a general-purpose system, the default jre package is the full
+JDK. When building a minimal system/image, you can override the
+`modules` parameter on `jre_minimal` to build a JRE with only the
+modules relevant for you:
+
+```nix
+let
+  my_jre = pkgs.jre_minimal.override {
+    modules = [
+      # The modules used by 'something' and 'other' combined:
+      "java.base"
+      "java.logging"
+    ];
+  };
+  something = (pkgs.something.override { jre = my_jre; });
+  other = (pkgs.other.override { jre = my_jre; });
+in
+  <...>
+```
+
+You can also specify what JDK your JRE should be based on, for example
+selecting a 'headless' build to avoid including a link to GTK+:
+
+```nix
+{
+  my_jre = pkgs.jre_minimal.override {
+    jdk = jdk11_headless;
+  };
+}
+```
+
+Note all JDKs passthru `home`, so if your application requires
+environment variables like `JAVA_HOME` being set, that can be done in a
+generic fashion with the `--set` argument of `makeWrapper`:
+
+```bash
+--set JAVA_HOME ${jdk.home}
+```
+
+It is possible to use a different Java compiler than `javac` from the
+OpenJDK. For instance, to use the GNU Java Compiler:
+
+```nix
+{
+  nativeBuildInputs = [ gcj ant ];
+}
+```
+
+Here, Ant will automatically use `gij` (the GNU Java Runtime) instead of
+the OpenJRE.
diff --git a/nixpkgs/doc/languages-frameworks/javascript.section.md b/nixpkgs/doc/languages-frameworks/javascript.section.md
new file mode 100644
index 000000000000..b0da08d022c0
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/javascript.section.md
@@ -0,0 +1,446 @@
+# Javascript {#language-javascript}
+
+## Introduction {#javascript-introduction}
+
+This contains instructions on how to package javascript applications.
+
+The various tools available will be listed in the [tools-overview](#javascript-tools-overview).
+Some general principles for packaging will follow.
+Finally some tool specific instructions will be given.
+
+## Getting unstuck / finding code examples {#javascript-finding-examples}
+
+If you find you are lacking inspiration for packaging javascript applications, the links below might prove useful.
+Searching online for prior art can be helpful if you are running into solved problems.
+
+### Github {#javascript-finding-examples-github}
+
+- Searching Nix files for `mkYarnPackage`: <https://github.com/search?q=mkYarnPackage+language%3ANix&type=code>
+- Searching just `flake.nix` files for `mkYarnPackage`: <https://github.com/search?q=mkYarnPackage+path%3A**%2Fflake.nix&type=code>
+
+### Gitlab {#javascript-finding-examples-gitlab}
+
+- Searching Nix files for `mkYarnPackage`: <https://gitlab.com/search?scope=blobs&search=mkYarnPackage+extension%3Anix>
+- Searching just `flake.nix` files for `mkYarnPackage`: <https://gitlab.com/search?scope=blobs&search=mkYarnPackage+filename%3Aflake.nix>
+
+## Tools overview {#javascript-tools-overview}
+
+## General principles {#javascript-general-principles}
+
+The following principles are given in order of importance with potential exceptions.
+
+### Try to use the same node version used upstream {#javascript-upstream-node-version}
+
+It is often not documented which node version is used upstream, but if it is, try to use the same version when packaging.
+
+This can be a problem if upstream is using the latest and greatest and you are trying to use an earlier version of node.
+Some cryptic errors regarding V8 may appear.
+
+### Try to respect the package manager originally used by upstream (and use the upstream lock file) {#javascript-upstream-package-manager}
+
+A lock file (package-lock.json, yarn.lock...) is supposed to make reproducible installations of `node_modules` for each tool.
+
+Guidelines of package managers, recommend to commit those lock files to the repos.
+If a particular lock file is present, it is a strong indication of which package manager is used upstream.
+
+It's better to try to use a Nix tool that understand the lock file.
+Using a different tool might give you hard to understand error because different packages have been installed.
+An example of problems that could arise can be found [here](https://github.com/NixOS/nixpkgs/pull/126629).
+Upstream use NPM, but this is an attempt to package it with `yarn2nix` (that uses yarn.lock).
+
+Using a different tool forces to commit a lock file to the repository.
+Those files are fairly large, so when packaging for nixpkgs, this approach does not scale well.
+
+Exceptions to this rule are:
+
+- When you encounter one of the bugs from a Nix tool. In each of the tool specific instructions, known problems will be detailed. If you have a problem with a particular tool, then it's best to try another tool, even if this means you will have to recreate a lock file and commit it to nixpkgs. In general `yarn2nix` has less known problems and so a simple search in nixpkgs will reveal many yarn.lock files committed.
+- Some lock files contain particular version of a package that has been pulled off NPM for some reason. In that case, you can recreate upstream lock (by removing the original and `npm install`, `yarn`, ...) and commit this to nixpkgs.
+- The only tool that supports workspaces (a feature of NPM that helps manage sub-directories with different package.json from a single top level package.json) is `yarn2nix`. If upstream has workspaces you should try `yarn2nix`.
+
+### Try to use upstream package.json {#javascript-upstream-package-json}
+
+Exceptions to this rule are:
+
+- Sometimes the upstream repo assumes some dependencies be installed globally. In that case you can add them manually to the upstream package.json (`yarn add xxx` or `npm install xxx`, ...). Dependencies that are installed locally can be executed with `npx` for CLI tools. (e.g. `npx postcss ...`, this is how you can call those dependencies in the phases).
+- Sometimes there is a version conflict between some dependency requirements. In that case you can fix a version by removing the `^`.
+- Sometimes the script defined in the package.json does not work as is. Some scripts for example use CLI tools that might not be available, or cd in directory with a different package.json (for workspaces notably). In that case, it's perfectly fine to look at what the particular script is doing and break this down in the phases. In the build script you can see `build:*` calling in turns several other build scripts like `build:ui` or `build:server`. If one of those fails, you can try to separate those into,
+
+  ```sh
+  yarn build:ui
+  yarn build:server
+  # OR
+  npm run build:ui
+  npm run build:server
+  ```
+
+  when you need to override a package.json. It's nice to use the one from the upstream source and do some explicit override. Here is an example:
+
+  ```nix
+  {
+    patchedPackageJSON = final.runCommand "package.json" { } ''
+      ${jq}/bin/jq '.version = "0.4.0" |
+        .devDependencies."@jsdoc/cli" = "^0.2.5"
+        ${sonar-src}/package.json > $out
+    '';
+  }
+  ```
+
+  You will still need to commit the modified version of the lock files, but at least the overrides are explicit for everyone to see.
+
+### Using node_modules directly {#javascript-using-node_modules}
+
+Each tool has an abstraction to just build the node_modules (dependencies) directory.
+You can always use the `stdenv.mkDerivation` with the node_modules to build the package (symlink the node_modules directory and then use the package build command).
+The node_modules abstraction can be also used to build some web framework frontends.
+For an example of this see how [plausible](https://github.com/NixOS/nixpkgs/blob/master/pkgs/servers/web-apps/plausible/default.nix) is built. `mkYarnModules` to make the derivation containing node_modules.
+Then when building the frontend you can just symlink the node_modules directory.
+
+## Javascript packages inside nixpkgs {#javascript-packages-nixpkgs}
+
+The [pkgs/development/node-packages](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/node-packages) folder contains a generated collection of [NPM packages](https://npmjs.com/) that can be installed with the Nix package manager.
+
+As a rule of thumb, the package set should only provide _end user_ software packages, such as command-line utilities.
+Libraries should only be added to the package set if there is a non-NPM package that requires it.
+
+When it is desired to use NPM libraries in a development project, use the `node2nix` generator directly on the `package.json` configuration file of the project.
+
+The package set provides support for the official stable Node.js versions.
+The latest stable LTS release in `nodePackages`, as well as the latest stable current release in `nodePackages_latest`.
+
+If your package uses native addons, you need to examine what kind of native build system it uses. Here are some examples:
+
+- `node-gyp`
+- `node-gyp-builder`
+- `node-pre-gyp`
+
+After you have identified the correct system, you need to override your package expression while adding in build system as a build input.
+For example, `dat` requires `node-gyp-build`, so we override its expression in [pkgs/development/node-packages/overrides.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/node-packages/overrides.nix):
+
+```nix
+  {
+    dat = prev.dat.override (oldAttrs: {
+      buildInputs = [ final.node-gyp-build pkgs.libtool pkgs.autoconf pkgs.automake ];
+      meta = oldAttrs.meta // { broken = since "12"; };
+    });
+  }
+```
+
+### Adding and Updating Javascript packages in nixpkgs {#javascript-adding-or-updating-packages}
+
+To add a package from NPM to nixpkgs:
+
+1. Modify [pkgs/development/node-packages/node-packages.json](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/node-packages/node-packages.json) to add, update or remove package entries to have it included in `nodePackages` and `nodePackages_latest`.
+2. Run the script:
+
+   ```sh
+   ./pkgs/development/node-packages/generate.sh
+   ```
+
+3. Build your new package to test your changes:
+
+   ```sh
+   nix-build -A nodePackages.<new-or-updated-package>
+   ```
+
+    To build against the latest stable Current Node.js version (e.g. 18.x):
+
+    ```sh
+    nix-build -A nodePackages_latest.<new-or-updated-package>
+    ```
+
+    If the package doesn't build, you may need to add an override as explained above.
+4. If the package's name doesn't match any of the executables it provides, add an entry in [pkgs/development/node-packages/main-programs.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/node-packages/main-programs.nix). This will be the case for all scoped packages, e.g., `@angular/cli`.
+5. Add and commit all modified and generated files.
+
+For more information about the generation process, consult the [README.md](https://github.com/svanderburg/node2nix) file of the `node2nix` tool.
+
+To update NPM packages in nixpkgs, run the same `generate.sh` script:
+
+```sh
+./pkgs/development/node-packages/generate.sh
+```
+
+#### Git protocol error {#javascript-git-error}
+
+Some packages may have Git dependencies from GitHub specified with `git://`.
+GitHub has [disabled unencrypted Git connections](https://github.blog/2021-09-01-improving-git-protocol-security-github/#no-more-unauthenticated-git), so you may see the following error when running the generate script:
+
+```
+The unauthenticated git protocol on port 9418 is no longer supported
+```
+
+Use the following Git configuration to resolve the issue:
+
+```sh
+git config --global url."https://github.com/".insteadOf git://github.com/
+```
+
+## Tool specific instructions {#javascript-tool-specific}
+
+### buildNpmPackage {#javascript-buildNpmPackage}
+
+`buildNpmPackage` allows you to package npm-based projects in Nixpkgs without the use of an auto-generated dependencies file (as used in [node2nix](#javascript-node2nix)).
+It works by utilizing npm's cache functionality -- creating a reproducible cache that contains the dependencies of a project, and pointing npm to it.
+
+Here's an example:
+
+```nix
+{ lib, buildNpmPackage, fetchFromGitHub }:
+
+buildNpmPackage rec {
+  pname = "flood";
+  version = "4.7.0";
+
+  src = fetchFromGitHub {
+    owner = "jesec";
+    repo = pname;
+    rev = "v${version}";
+    hash = "sha256-BR+ZGkBBfd0dSQqAvujsbgsEPFYw/ThrylxUbOksYxM=";
+  };
+
+  npmDepsHash = "sha256-tuEfyePwlOy2/mOPdXbqJskO6IowvAP4DWg8xSZwbJw=";
+
+  # The prepack script runs the build script, which we'd rather do in the build phase.
+  npmPackFlags = [ "--ignore-scripts" ];
+
+  NODE_OPTIONS = "--openssl-legacy-provider";
+
+  meta = {
+    description = "A modern web UI for various torrent clients with a Node.js backend and React frontend";
+    homepage = "https://flood.js.org";
+    license = lib.licenses.gpl3Only;
+    maintainers = with lib.maintainers; [ winter ];
+  };
+}
+```
+
+In the default `installPhase` set by `buildNpmPackage`, it uses `npm pack --json --dry-run` to decide what files to install in `$out/lib/node_modules/$name/`, where `$name` is the `name` string defined in the package's `package.json`.
+Additionally, the `bin` and `man` keys in the source's `package.json` are used to decide what binaries and manpages are supposed to be installed.
+If these are not defined, `npm pack` may miss some files, and no binaries will be produced.
+
+#### Arguments {#javascript-buildNpmPackage-arguments}
+
+* `npmDepsHash`: The output hash of the dependencies for this project. Can be calculated in advance with [`prefetch-npm-deps`](#javascript-buildNpmPackage-prefetch-npm-deps).
+* `makeCacheWritable`: Whether to make the cache writable prior to installing dependencies. Don't set this unless npm tries to write to the cache directory, as it can slow down the build.
+* `npmBuildScript`: The script to run to build the project. Defaults to `"build"`.
+* `npmWorkspace`: The workspace directory within the project to build and install.
+* `dontNpmBuild`: Option to disable running the build script. Set to `true` if the package does not have a build script. Defaults to `false`. Alternatively, setting `buildPhase` explicitly also disables this.
+* `dontNpmInstall`: Option to disable running `npm install`. Defaults to `false`. Alternatively, setting `installPhase` explicitly also disables this.
+* `npmFlags`: Flags to pass to all npm commands.
+* `npmInstallFlags`: Flags to pass to `npm ci`.
+* `npmBuildFlags`: Flags to pass to `npm run ${npmBuildScript}`.
+* `npmPackFlags`: Flags to pass to `npm pack`.
+* `npmPruneFlags`: Flags to pass to `npm prune`. Defaults to the value of `npmInstallFlags`.
+* `makeWrapperArgs`: Flags to pass to `makeWrapper`, added to executable calling the generated `.js` with `node` as an interpreter. These scripts are defined in `package.json`.
+* `nodejs`: The `nodejs` package to build against, using the corresponding `npm` shipped with that version of `node`. Defaults to `pkgs.nodejs`.
+* `npmDeps`: The dependencies used to build the npm package. Especially useful to not have to recompute workspace depedencies.
+
+#### prefetch-npm-deps {#javascript-buildNpmPackage-prefetch-npm-deps}
+
+`prefetch-npm-deps` is a Nixpkgs package that calculates the hash of the dependencies of an npm project ahead of time.
+
+```console
+$ ls
+package.json package-lock.json index.js
+$ prefetch-npm-deps package-lock.json
+...
+sha256-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
+```
+
+#### fetchNpmDeps {#javascript-buildNpmPackage-fetchNpmDeps}
+
+`fetchNpmDeps` is a Nix function that requires the following mandatory arguments:
+
+- `src`: A directory / tarball with `package-lock.json` file
+- `hash`: The output hash of the node dependencies defined in `package-lock.json`.
+
+It returns a derivation with all `package-lock.json` dependencies downloaded into `$out/`, usable as an npm cache.
+
+#### importNpmLock {#javascript-buildNpmPackage-importNpmLock}
+
+`importNpmLock` is a Nix function that requires the following optional arguments:
+
+- `npmRoot`: Path to package directory containing the source tree
+- `package`: Parsed contents of `package.json`
+- `packageLock`: Parsed contents of `package-lock.json`
+- `pname`: Package name
+- `version`: Package version
+
+It returns a derivation with a patched `package.json` & `package-lock.json` with all dependencies resolved to Nix store paths.
+
+This function is analogous to using `fetchNpmDeps`, but instead of specifying `hash` it uses metadata from `package.json` & `package-lock.json`.
+
+Note that `npmHooks.npmConfigHook` cannot be used with `importNpmLock`. You will instead need to use `importNpmLock.npmConfigHook`:
+
+```nix
+{ buildNpmPackage, importNpmLock }:
+
+buildNpmPackage {
+  pname = "hello";
+  version = "0.1.0";
+
+  npmDeps = importNpmLock {
+    npmRoot = ./.;
+  };
+
+  npmConfigHook = importNpmLock.npmConfigHook;
+}
+```
+
+### corepack {#javascript-corepack}
+
+This package puts the corepack wrappers for pnpm and yarn in your PATH, and they will honor the `packageManager` setting in the `package.json`.
+
+### node2nix {#javascript-node2nix}
+
+#### Preparation {#javascript-node2nix-preparation}
+
+You will need to generate a Nix expression for the dependencies. Don't forget the `-l package-lock.json` if there is a lock file. Most probably you will need the `--development` to include the `devDependencies`
+
+So the command will most likely be:
+```sh
+node2nix --development -l package-lock.json
+```
+
+See `node2nix` [docs](https://github.com/svanderburg/node2nix) for more info.
+
+#### Pitfalls {#javascript-node2nix-pitfalls}
+
+- If upstream package.json does not have a "version" attribute, `node2nix` will crash. You will need to add it like shown in [the package.json section](#javascript-upstream-package-json).
+- `node2nix` has some [bugs](https://github.com/svanderburg/node2nix/issues/238) related to working with lock files from NPM distributed with `nodejs_16`.
+- `node2nix` does not like missing packages from NPM. If you see something like `Cannot resolve version: vue-loader-v16@undefined` then you might want to try another tool. The package might have been pulled off of NPM.
+
+### yarn2nix {#javascript-yarn2nix}
+
+#### Preparation {#javascript-yarn2nix-preparation}
+
+You will need at least a `yarn.lock` file. If upstream does not have one you need to generate it and reference it in your package definition.
+
+If the downloaded files contain the `package.json` and `yarn.lock` files they can be used like this:
+
+```nix
+{
+  offlineCache = fetchYarnDeps {
+    yarnLock = src + "/yarn.lock";
+    hash = "....";
+  };
+}
+```
+
+#### mkYarnPackage {#javascript-yarn2nix-mkYarnPackage}
+
+`mkYarnPackage` will by default try to generate a binary. For package only generating static assets (Svelte, Vue, React, WebPack, ...), you will need to explicitly override the build step with your instructions.
+
+It's important to use the `--offline` flag. For example if you script is `"build": "something"` in `package.json` use:
+
+```nix
+{
+  buildPhase = ''
+    export HOME=$(mktemp -d)
+    yarn --offline build
+  '';
+}
+```
+
+The `distPhase` is packing the package's dependencies in a tarball using `yarn pack`. You can disable it using:
+
+```nix
+{
+  doDist = false;
+}
+```
+
+The configure phase can sometimes fail because it makes many assumptions which may not always apply. One common override is:
+
+```nix
+{
+  configurePhase = ''
+    ln -s $node_modules node_modules
+  '';
+}
+```
+
+or if you need a writeable node_modules directory:
+
+```nix
+{
+  configurePhase = ''
+    cp -r $node_modules node_modules
+    chmod +w node_modules
+  '';
+}
+```
+
+#### mkYarnModules {#javascript-yarn2nix-mkYarnModules}
+
+This will generate a derivation including the `node_modules` directory.
+If you have to build a derivation for an integrated web framework (rails, phoenix..), this is probably the easiest way.
+
+#### Overriding dependency behavior {#javascript-mkYarnPackage-overriding-dependencies}
+
+In the `mkYarnPackage` record the property `pkgConfig` can be used to override packages when you encounter problems building.
+
+For instance, say your package is throwing errors when trying to invoke node-sass:
+
+```
+ENOENT: no such file or directory, scandir '/build/source/node_modules/node-sass/vendor'
+```
+
+To fix this we will specify different versions of build inputs to use, as well as some post install steps to get the software built the way we want:
+
+```nix
+mkYarnPackage rec {
+  pkgConfig = {
+    node-sass = {
+      buildInputs = with final;[ python libsass pkg-config ];
+      postInstall = ''
+        LIBSASS_EXT=auto yarn --offline run build
+        rm build/config.gypi
+      '';
+    };
+  };
+}
+```
+
+#### Pitfalls {#javascript-yarn2nix-pitfalls}
+
+- If version is missing from upstream package.json, yarn will silently install nothing. In that case, you will need to override package.json as shown in the [package.json section](#javascript-upstream-package-json)
+- Having trouble with `node-gyp`? Try adding these lines to the `yarnPreBuild` steps:
+
+  ```nix
+  {
+    yarnPreBuild = ''
+      mkdir -p $HOME/.node-gyp/${nodejs.version}
+      echo 9 > $HOME/.node-gyp/${nodejs.version}/installVersion
+      ln -sfv ${nodejs}/include $HOME/.node-gyp/${nodejs.version}
+      export npm_config_nodedir=${nodejs}
+    '';
+  }
+  ```
+
+  - The `echo 9` steps comes from this answer: <https://stackoverflow.com/a/49139496>
+  - Exporting the headers in `npm_config_nodedir` comes from this issue: <https://github.com/nodejs/node-gyp/issues/1191#issuecomment-301243919>
+- `offlineCache` (described [above](#javascript-yarn2nix-preparation)) must be specified to avoid [Import From Derivation](#ssec-import-from-derivation) (IFD) when used inside Nixpkgs.
+
+## Outside Nixpkgs {#javascript-outside-nixpkgs}
+
+There are some other tools available, which are written in the Nix language.
+These that can't be used inside Nixpkgs because they require [Import From Derivation](#ssec-import-from-derivation), which is not allowed in Nixpkgs.
+
+If you are packaging something outside Nixpkgs, consider the following:
+
+### npmlock2nix {#javascript-npmlock2nix}
+
+[npmlock2nix](https://github.com/nix-community/npmlock2nix) aims at building `node_modules` without code generation. It hasn't reached v1 yet, the API might be subject to change.
+
+#### Pitfalls {#javascript-npmlock2nix-pitfalls}
+
+There are some [problems with npm v7](https://github.com/tweag/npmlock2nix/issues/45).
+
+### nix-npm-buildpackage {#javascript-nix-npm-buildpackage}
+
+[nix-npm-buildpackage](https://github.com/serokell/nix-npm-buildpackage) aims at building `node_modules` without code generation. It hasn't reached v1 yet, the API might change. It supports both `package-lock.json` and yarn.lock.
+
+#### Pitfalls {#javascript-nix-npm-buildpackage-pitfalls}
+
+There are some [problems with npm v7](https://github.com/serokell/nix-npm-buildpackage/issues/33).
diff --git a/nixpkgs/doc/languages-frameworks/julia.section.md b/nixpkgs/doc/languages-frameworks/julia.section.md
new file mode 100644
index 000000000000..235861ac528f
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/julia.section.md
@@ -0,0 +1,69 @@
+# Julia {#language-julia}
+
+## Introduction {#julia-introduction}
+
+Nixpkgs includes Julia as the `julia` derivation.
+You can get specific versions by looking at the other `julia*` top-level derivations available.
+For example, `julia_19` corresponds to Julia 1.9.
+We also provide the current stable version as `julia-stable`, and an LTS version as `julia-lts`.
+
+Occasionally, a Julia version has been too difficult to build from source in Nixpkgs and has been fetched prebuilt instead.
+These Julia versions are differentiated with the `*-bin` suffix; for example, `julia-stable-bin`.
+
+## julia.withPackages {#julia-withpackage}
+
+The basic Julia derivations only provide the built-in packages that come with the distribution.
+
+You can build Julia environments with additional packages using the `julia.withPackages` command.
+This function accepts a list of strings representing Julia package names.
+For example, you can build a Julia environment with the `Plots` package as follows.
+
+```nix
+julia.withPackages ["Plots"]
+```
+
+Arguments can be passed using `.override`.
+For example:
+
+```nix
+(julia.withPackages.override {
+  precompile = false; # Turn off precompilation
+}) ["Plots"]
+```
+
+Here's a nice way to run a Julia environment with a shell one-liner:
+
+```sh
+nix-shell -p 'julia.withPackages ["Plots"]' --run julia
+```
+
+### Arguments {#julia-withpackage-arguments}
+
+* `precompile`: Whether to run `Pkg.precompile()` on the generated environment.
+
+  This will make package imports faster, but may fail in some cases.
+  For example, there is an upstream issue with `Gtk.jl` that prevents precompilation from working in the Nix build sandbox, because the precompiled code tries to access a display.
+  Packages like this will work fine if you build with `precompile=false`, and then precompile as needed once your environment starts.
+
+  Defaults: `true`
+
+* `extraLibs`: Extra library dependencies that will be placed on the `LD_LIBRARY_PATH` for Julia.
+
+  Should not be needed as we try to obtain library dependencies automatically using Julia's artifacts system.
+
+* `makeWrapperArgs`: Extra arguments to pass to the `makeWrapper` call which we use to wrap the Julia binary.
+* `setDefaultDepot`: Whether to automatically prepend `$HOME/.julia` to the `JULIA_DEPOT_PATH`.
+
+  This is useful because Julia expects a writable depot path as the first entry, which the one we build in Nixpkgs is not.
+  If there's no writable depot, then Julia will show a warning and be unable to save command history logs etc.
+
+  Default: `true`
+
+* `packageOverrides`: Allows you to override packages by name by passing an alternative source.
+
+  For example, you can use a custom version of the `LanguageServer` package by passing `packageOverrides = { "LanguageServer" = fetchFromGitHub {...}; }`.
+
+* `augmentedRegistry`: Allows you to change the registry from which Julia packages are drawn.
+
+  This normally points at a special augmented version of the Julia [General packages registry](https://github.com/JuliaRegistries/General).
+  If you want to use a bleeding-edge version to pick up the latest package updates, you can plug in a later revision than the one in Nixpkgs.
diff --git a/nixpkgs/doc/languages-frameworks/lisp.section.md b/nixpkgs/doc/languages-frameworks/lisp.section.md
new file mode 100644
index 000000000000..73f20436c76f
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/lisp.section.md
@@ -0,0 +1,301 @@
+# lisp-modules {#lisp}
+
+This document describes the Nixpkgs infrastructure for building Common Lisp
+systems that use [ASDF](https://asdf.common-lisp.dev/) (Another System
+Definition Facility). It lives in `pkgs/development/lisp-modules`.
+
+## Overview {#lisp-overview}
+
+The main entry point of the API are the Common Lisp implementation packages
+themselves (e.g. `abcl`, `ccl`, `clasp-common-lisp`, `clisp`, `ecl`,
+`sbcl`). They have the `pkgs` and `withPackages` attributes, which can be used
+to discover available packages and to build wrappers, respectively.
+
+The `pkgs` attribute set contains packages that were automatically
+[imported](#lisp-importing-packages-from-quicklisp) from Quicklisp, and any
+other [manually defined](#lisp-defining-packages-inside) ones. Not every package
+works for all the CL implementations (e.g. `nyxt` only makes sense for `sbcl`).
+
+The `withPackages` function is of primary utility. It is used to build
+[runnable wrappers](#lisp-building-wrappers), with a pinned and pre-built
+[ASDF FASL](#lisp-loading-asdf) available in the `ASDF` environment variable,
+and `CL_SOURCE_REGISTRY`/`ASDF_OUTPUT_TRANSLATIONS` configured to
+[find the desired systems on runtime](#lisp-loading-systems).
+
+In addition, Lisps have the `withOverrides` function, which can be used to
+[substitute](#lisp-including-external-pkg-in-scope) any package in the scope of
+their `pkgs`. This will also be useful together with `overrideLispAttrs` when
+[dealing with slashy systems](#lisp-dealing-with-slashy-systems), because they
+should stay in the main package and be built by specifying the `systems`
+argument to `build-asdf-system`.
+
+## The 90% use case example {#lisp-use-case-example}
+
+The most common way to use the library is to run ad-hoc wrappers like this:
+
+`nix-shell -p 'sbcl.withPackages (ps: with ps; [ alexandria ])'`
+
+Then, in a shell:
+
+```
+$ sbcl
+* (load (sb-ext:posix-getenv "ASDF"))
+* (asdf:load-system 'alexandria)
+```
+
+Also one can create a `pkgs.mkShell` environment in `shell.nix`/`flake.nix`:
+
+```nix
+let
+  sbcl' = sbcl.withPackages (ps: [ ps.alexandria ]);
+in mkShell {
+  packages = [ sbcl' ];
+}
+```
+
+Such a Lisp can be now used e.g. to compile your sources:
+
+```nix
+{
+  buildPhase = ''
+    ${sbcl'}/bin/sbcl --load my-build-file.lisp
+  '';
+}
+```
+
+## Importing packages from Quicklisp {#lisp-importing-packages-from-quicklisp}
+
+To save some work of writing Nix expressions, there is a script that imports all
+the packages distributed by Quicklisp into `imported.nix`. This works by parsing
+its `releases.txt` and `systems.txt` files, which are published every couple of
+months on [quicklisp.org](https://beta.quicklisp.org/dist/quicklisp.txt).
+
+The import process is implemented in the `import` directory as Common Lisp
+code in the `org.lispbuilds.nix` ASDF system. To run the script, one can
+execute `ql-import.lisp`:
+
+```
+cd pkgs/development/lisp-modules
+nix-shell --run 'sbcl --script ql-import.lisp'
+```
+
+The script will:
+
+1. Download the latest Quicklisp `systems.txt` and `releases.txt` files
+2. Generate a temporary SQLite database of all QL systems in `packages.sqlite`
+3. Generate an `imported.nix` file from the database
+
+(The `packages.sqlite` file can be deleted at will, because it is regenerated
+each time the script runs.)
+
+The maintainer's job is to:
+
+1. Re-run the `ql-import.lisp` script when there is a new Quicklisp release
+2. [Add any missing native dependencies](#lisp-quicklisp-adding-native-dependencies) in `ql.nix`
+3. For packages that still don't build, [package them manually](#lisp-defining-packages-inside) in `packages.nix`
+
+Also, the `imported.nix` file **must not be edited manually**! It should only be
+generated as described in this section (by running `ql-import.lisp`).
+
+### Adding native dependencies {#lisp-quicklisp-adding-native-dependencies}
+
+The Quicklisp files contain ASDF dependency data, but don't include native
+library (CFFI) dependencies, and, in the case of ABCL, Java dependencies.
+
+The `ql.nix` file contains a long list of overrides, where these dependencies
+can be added.
+
+Packages defined in `packages.nix` contain these dependencies naturally.
+
+### Trusting `systems.txt` and `releases.txt` {#lisp-quicklisp-trusting}
+
+The previous implementation of `lisp-modules` didn't fully trust the Quicklisp
+data, because there were times where the dependencies specified were not
+complete and caused broken builds. It instead used a `nix-shell` environment to
+discover real dependencies by using the ASDF APIs.
+
+The current implementation has chosen to trust this data, because it's faster to
+parse a text file than to build each system to generate its Nix file, and
+because that way packages can be mass-imported. Because of that, there may come
+a day where some packages will break, due to bugs in Quicklisp. In that case,
+the fix could be a manual override in `packages.nix` and `ql.nix`.
+
+A known fact is that Quicklisp doesn't include dependencies on slashy systems in
+its data. This is an example of a situation where such fixes were used, e.g. to
+replace the `systems` attribute of the affected packages. (See the definition of
+`iolib`).
+
+### Quirks {#lisp-quicklisp-quirks}
+
+During Quicklisp import:
+
+- `+` in names is converted to `_plus{_,}`: `cl+ssl`->`cl_plus_ssl`, `alexandria+`->`alexandria_plus`
+- `.` in names is converted to `_dot_`: `iolib.base`->`iolib_dot_base`
+- names starting with a number have a `_` prepended (`3d-vectors`->`_3d-vectors`)
+- `_` in names is converted to `__` for reversibility
+
+
+## Defining packages manually inside Nixpkgs {#lisp-defining-packages-inside}
+
+Packages that for some reason are not in Quicklisp, and so cannot be
+auto-imported, or don't work straight from the import, are defined in the
+`packages.nix` file.
+
+In that file, use the `build-asdf-system` function, which is a wrapper around
+`mkDerivation` for building ASDF systems. Various other hacks are present, such
+as `build-with-compile-into-pwd` for systems which create files during
+compilation (such as cl-unicode).
+
+The `build-asdf-system` function is documented
+[here](#lisp-defining-packages-outside). Also, `packages.nix` is full of
+examples of how to use it.
+
+## Defining packages manually outside Nixpkgs {#lisp-defining-packages-outside}
+
+Lisp derivations (`abcl`, `sbcl` etc.) also export the `buildASDFSystem`
+function, which is similar to `build-asdf-system` from `packages.nix`, but is
+part of the public API.
+
+It takes the following arguments:
+
+- `pname`: the package name
+- `version`: the package version
+- `src`: the package source
+- `patches`: patches to apply to the source before build
+- `nativeLibs`: native libraries used by CFFI and grovelling
+- `javaLibs`: Java libraries for ABCL
+- `lispLibs`: dependencies on other packages build with `buildASDFSystem`
+- `systems`: list of systems to build
+
+It can be used to define packages outside Nixpkgs, and, for example, add them
+into the package scope with `withOverrides`.
+
+### Including an external package in scope {#lisp-including-external-pkg-in-scope}
+
+A package defined outside Nixpkgs using `buildASDFSystem` can be woven into the
+Nixpkgs-provided scope like this:
+
+```nix
+let
+  alexandria = sbcl.buildASDFSystem rec {
+    pname = "alexandria";
+    version = "1.4";
+    src = fetchFromGitLab {
+      domain = "gitlab.common-lisp.net";
+      owner = "alexandria";
+      repo = "alexandria";
+      rev = "v${version}";
+      hash = "sha256-1Hzxt65dZvgOFIljjjlSGgKYkj+YBLwJCACi5DZsKmQ=";
+    };
+  };
+  sbcl' = sbcl.withOverrides (self: super: {
+    inherit alexandria;
+  });
+in sbcl'.pkgs.alexandria
+```
+
+## Overriding package attributes {#lisp-overriding-package-attributes}
+
+Packages export the `overrideLispAttrs` function, which can be used to build a
+new package with different parameters.
+
+Example of overriding `alexandria`:
+
+```nix
+sbcl.pkgs.alexandria.overrideLispAttrs (oldAttrs: rec {
+  version = "1.4";
+  src = fetchFromGitLab {
+    domain = "gitlab.common-lisp.net";
+    owner = "alexandria";
+    repo = "alexandria";
+    rev = "v${version}";
+    hash = "sha256-1Hzxt65dZvgOFIljjjlSGgKYkj+YBLwJCACi5DZsKmQ=";
+  };
+})
+```
+
+### Dealing with slashy systems {#lisp-dealing-with-slashy-systems}
+
+Slashy (secondary) systems should not exist in their own packages! Instead, they
+should be included in the parent package as an extra entry in the `systems`
+argument to the `build-asdf-system`/`buildASDFSystem` functions.
+
+The reason is that ASDF searches for a secondary system in the `.asd` of the
+parent package. Thus, having them separate would cause either one of them not to
+load cleanly, because one will contains FASLs of itself but not the other, and
+vice versa.
+
+To package slashy systems, use `overrideLispAttrs`, like so:
+
+```nix
+ecl.pkgs.alexandria.overrideLispAttrs (oldAttrs: {
+  systems = oldAttrs.systems ++ [ "alexandria/tests" ];
+  lispLibs = oldAttrs.lispLibs ++ [ ecl.pkgs.rt ];
+})
+```
+
+See the [respective section](#lisp-including-external-pkg-in-scope) on using
+`withOverrides` for how to weave it back into `ecl.pkgs`.
+
+Note that sometimes the slashy systems might not only have more dependencies
+than the main one, but create a circular dependency between `.asd`
+files. Unfortunately, in this case an adhoc solution becomes necessary.
+
+## Building Wrappers {#lisp-building-wrappers}
+
+Wrappers can be built using the `withPackages` function of Common Lisp
+implementations (`abcl`, `ecl`, `sbcl` etc.):
+
+```
+nix-shell -p 'sbcl.withPackages (ps: [ ps.alexandria ps.bordeaux-threads ])'
+```
+
+Such a wrapper can then be used like this:
+
+```
+$ sbcl
+* (load (sb-ext:posix-getenv "ASDF"))
+* (asdf:load-system 'alexandria)
+* (asdf:load-system 'bordeaux-threads)
+```
+
+### Loading ASDF {#lisp-loading-asdf}
+
+For best results, avoid calling `(require 'asdf)` When using the
+library-generated wrappers.
+
+Use `(load (ext:getenv "ASDF"))` instead, supplying your implementation's way of
+getting an environment variable for `ext:getenv`. This will load the
+(pre-compiled to FASL) Nixpkgs-provided version of ASDF.
+
+### Loading systems {#lisp-loading-systems}
+
+There, you can use `asdf:load-system`. This works by setting the right
+values for the `CL_SOURCE_REGISTRY`/`ASDF_OUTPUT_TRANSLATIONS` environment
+variables, so that systems are found in the Nix store and pre-compiled FASLs are
+loaded.
+
+## Adding a new Lisp {#lisp-adding-a-new-lisp}
+
+The function `wrapLisp` is used to wrap Common Lisp implementations. It adds the
+`pkgs`, `withPackages`, `withOverrides` and `buildASDFSystem` attributes to the
+derivation.
+
+`wrapLisp` takes these arguments:
+
+- `pkg`: the Lisp package
+- `faslExt`: Implementation-specific extension for FASL files
+- `program`: The name of executable file in `${pkg}/bin/` (Default: `pkg.pname`)
+- `flags`: A list of flags to always pass to `program` (Default: `[]`)
+- `asdf`: The ASDF version to use (Default: `pkgs.asdf_3_3`)
+- `packageOverrides`: Package overrides config (Default: `(self: super: {})`)
+
+This example wraps CLISP:
+
+```nix
+wrapLisp {
+  pkg = clisp;
+  faslExt = "fas";
+  flags = ["-E" "UTF8"];
+}
+```
diff --git a/nixpkgs/doc/languages-frameworks/lua.section.md b/nixpkgs/doc/languages-frameworks/lua.section.md
new file mode 100644
index 000000000000..db230cf944a5
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/lua.section.md
@@ -0,0 +1,258 @@
+# Lua {#lua}
+
+## Using Lua {#lua-userguide}
+
+### Overview of Lua {#lua-overview}
+
+Several versions of the Lua interpreter are available: luajit, lua 5.1, 5.2, 5.3.
+The attribute `lua` refers to the default interpreter, it is also possible to refer to specific versions, e.g. `lua5_2` refers to Lua 5.2.
+
+Lua libraries are in separate sets, with one set per interpreter version.
+
+The interpreters have several common attributes. One of these attributes is
+`pkgs`, which is a package set of Lua libraries for this specific
+interpreter. E.g., the `busted` package corresponding to the default interpreter
+is `lua.pkgs.busted`, and the lua 5.2 version is `lua5_2.pkgs.busted`.
+The main package set contains aliases to these package sets, e.g.
+`luaPackages` refers to `lua5_1.pkgs` and `lua52Packages` to
+`lua5_2.pkgs`.
+
+### Installing Lua and packages {#installing-lua-and-packages}
+
+#### Lua environment defined in separate `.nix` file {#lua-environment-defined-in-separate-.nix-file}
+
+Create a file, e.g. `build.nix`, with the following expression
+
+```nix
+with import <nixpkgs> {};
+
+lua5_2.withPackages (ps: with ps; [ busted luafilesystem ])
+```
+
+and install it in your profile with
+
+```shell
+nix-env -if build.nix
+```
+Now you can use the Lua interpreter, as well as the extra packages (`busted`,
+`luafilesystem`) that you added to the environment.
+
+#### Lua environment defined in `~/.config/nixpkgs/config.nix` {#lua-environment-defined-in-.confignixpkgsconfig.nix}
+
+If you prefer to, you could also add the environment as a package override to the Nixpkgs set, e.g.
+using `config.nix`,
+
+```nix
+{ # ...
+
+  packageOverrides = pkgs: with pkgs; {
+    myLuaEnv = lua5_2.withPackages (ps: with ps; [ busted luafilesystem ]);
+  };
+}
+```
+
+and install it in your profile with
+
+```shell
+nix-env -iA nixpkgs.myLuaEnv
+```
+The environment is installed by referring to the attribute, and considering
+the `nixpkgs` channel was used.
+
+#### Lua environment defined in `/etc/nixos/configuration.nix` {#lua-environment-defined-in-etcnixosconfiguration.nix}
+
+For the sake of completeness, here's another example how to install the environment system-wide.
+
+```nix
+{ # ...
+
+  environment.systemPackages = with pkgs; [
+    (lua.withPackages(ps: with ps; [ busted luafilesystem ]))
+  ];
+}
+```
+
+### How to override a Lua package using overlays? {#how-to-override-a-lua-package-using-overlays}
+
+Use the following overlay template:
+
+```nix
+final: prev:
+{
+
+  lua = prev.lua.override {
+    packageOverrides = luaself: luaprev: {
+
+      luarocks-nix = luaprev.luarocks-nix.overrideAttrs(oa: {
+        pname = "luarocks-nix";
+        src = /home/my_luarocks/repository;
+      });
+    };
+  };
+
+  luaPackages = lua.pkgs;
+}
+```
+
+### Temporary Lua environment with `nix-shell` {#temporary-lua-environment-with-nix-shell}
+
+
+There are two methods for loading a shell with Lua packages. The first and recommended method
+is to create an environment with `lua.buildEnv` or `lua.withPackages` and load that. E.g.
+
+```sh
+$ nix-shell -p 'lua.withPackages(ps: with ps; [ busted luafilesystem ])'
+```
+
+opens a shell from which you can launch the interpreter
+
+```sh
+[nix-shell:~] lua
+```
+
+The other method, which is not recommended, does not create an environment and requires you to list the packages directly,
+
+```sh
+$ nix-shell -p lua.pkgs.busted lua.pkgs.luafilesystem
+```
+Again, it is possible to launch the interpreter from the shell.
+The Lua interpreter has the attribute `pkgs` which contains all Lua libraries for that specific interpreter.
+
+
+## Developing with lua {#lua-developing}
+
+Now that you know how to get a working Lua environment with Nix, it is time
+to go forward and start actually developing with Lua. There are two ways to
+package lua software, either it is on luarocks and most of it can be taken care
+of by the luarocks2nix converter or the packaging has to be done manually.
+Let's present the luarocks way first and the manual one in a second time.
+
+### Packaging a library on luarocks {#packaging-a-library-on-luarocks}
+
+[Luarocks.org](https://luarocks.org/) is the main repository of lua packages.
+The site proposes two types of packages, the `rockspec` and the `src.rock`
+(equivalent of a [rockspec](https://github.com/luarocks/luarocks/wiki/Rockspec-format) but with the source).
+
+Luarocks-based packages are generated in [pkgs/development/lua-modules/generated-packages.nix](https://github.com/NixOS/nixpkgs/tree/master/pkgs/development/lua-modules/generated-packages.nix) from
+the whitelist maintainers/scripts/luarocks-packages.csv and updated by running
+the package `luarocks-packages-updater`:
+
+```sh
+
+nix-shell -p luarocks-packages-updater --run luarocks-packages-updater
+```
+
+[luarocks2nix](https://github.com/nix-community/luarocks) is a tool capable of generating nix derivations from both rockspec and src.rock (and favors the src.rock).
+The automation only goes so far though and some packages need to be customized.
+These customizations go in [pkgs/development/lua-modules/overrides.nix](https://github.com/NixOS/nixpkgs/tree/master/pkgs/development/lua-modules/overrides.nix).
+For instance if the rockspec defines `external_dependencies`, these need to be manually added to the overrides.nix.
+
+You can try converting luarocks packages to nix packages with the command `nix-shell -p luarocks-nix` and then `luarocks nix PKG_NAME`.
+
+#### Packaging a library manually {#packaging-a-library-manually}
+
+You can develop your package as you usually would, just don't forget to wrap it
+within a `toLuaModule` call, for instance
+
+```nix
+{
+  mynewlib = toLuaModule ( stdenv.mkDerivation { /* ... */ });
+}
+```
+
+There is also the `buildLuaPackage` function that can be used when lua modules
+are not packaged for luarocks. You can see a few examples at `pkgs/top-level/lua-packages.nix`.
+
+## Lua Reference {#lua-reference}
+
+### Lua interpreters {#lua-interpreters}
+
+Versions 5.1, 5.2, 5.3 and 5.4 of the lua interpreter are available as
+respectively `lua5_1`, `lua5_2`, `lua5_3` and `lua5_4`. Luajit is available too.
+The Nix expressions for the interpreters can be found in `pkgs/development/interpreters/lua-5`.
+
+#### Attributes on lua interpreters packages {#attributes-on-lua-interpreters-packages}
+
+Each interpreter has the following attributes:
+
+- `interpreter`. Alias for `${pkgs.lua}/bin/lua`.
+- `buildEnv`. Function to build lua interpreter environments with extra packages bundled together. See section *lua.buildEnv function* for usage and documentation.
+- `withPackages`. Simpler interface to `buildEnv`.
+- `pkgs`. Set of Lua packages for that specific interpreter. The package set can be modified by overriding the interpreter and passing `packageOverrides`.
+
+#### `buildLuarocksPackage` function {#buildluarockspackage-function}
+
+The `buildLuarocksPackage` function is implemented in `pkgs/development/interpreters/lua-5/build-luarocks-package.nix`
+The following is an example:
+```nix
+{
+  luaposix = buildLuarocksPackage {
+    pname = "luaposix";
+    version = "34.0.4-1";
+
+    src = fetchurl {
+      url    = "https://raw.githubusercontent.com/rocks-moonscript-org/moonrocks-mirror/master/luaposix-34.0.4-1.src.rock";
+      hash = "sha256-4mLJG8n4m6y4Fqd0meUDfsOb9RHSR0qa/KD5KCwrNXs=";
+    };
+    disabled = (luaOlder "5.1") || (luaAtLeast "5.4");
+    propagatedBuildInputs = [ bit32 lua std_normalize ];
+
+    meta = {
+      homepage = "https://github.com/luaposix/luaposix/";
+      description = "Lua bindings for POSIX";
+      maintainers = with lib.maintainers; [ vyp lblasc ];
+      license.fullName = "MIT/X11";
+    };
+  };
+}
+```
+
+The `buildLuarocksPackage` delegates most tasks to luarocks:
+
+* it adds `luarocks` as an unpacker for `src.rock` files (zip files really).
+* `configurePhase` writes a temporary luarocks configuration file which location
+is exported via the environment variable `LUAROCKS_CONFIG`.
+* the `buildPhase` does nothing.
+* `installPhase` calls `luarocks make --deps-mode=none --tree $out` to build and
+install the package
+* In the `postFixup` phase, the `wrapLuaPrograms` bash function is called to
+  wrap all programs in the `$out/bin/*` directory to include `$PATH`
+  environment variable and add dependent libraries to script's `LUA_PATH` and
+  `LUA_CPATH`.
+
+It accepts as arguments:
+
+* 'luarocksConfig': a nix value that directly maps to the luarocks config used during
+  the installation
+
+By default `meta.platforms` is set to the same value as the interpreter unless overridden otherwise.
+
+#### `buildLuaApplication` function {#buildluaapplication-function}
+
+The `buildLuaApplication` function is practically the same as `buildLuaPackage`.
+The difference is that `buildLuaPackage` by default prefixes the names of the packages with the version of the interpreter.
+Because with an application we're not interested in multiple version the prefix is dropped.
+
+#### lua.withPackages function {#lua.withpackages-function}
+
+The `lua.withPackages` takes a function as an argument that is passed the set of lua packages and returns the list of packages to be included in the environment.
+Using the `withPackages` function, the previous example for the luafilesystem environment can be written like this:
+
+```nix
+lua.withPackages (ps: [ps.luafilesystem])
+```
+
+`withPackages` passes the correct package set for the specific interpreter version as an argument to the function. In the above example, `ps` equals `luaPackages`.
+But you can also easily switch to using `lua5_1`:
+
+```nix
+lua5_1.withPackages (ps: [ps.lua])
+```
+
+Now, `ps` is set to `lua5_1.pkgs`, matching the version of the interpreter.
+
+### Lua Contributing guidelines {#lua-contributing}
+
+Following rules should be respected:
+
+* Commit names of Lua libraries should reflect that they are Lua libraries, so write for example `luaPackages.luafilesystem: 1.11 -> 1.12`.
diff --git a/nixpkgs/doc/languages-frameworks/maven.section.md b/nixpkgs/doc/languages-frameworks/maven.section.md
new file mode 100644
index 000000000000..e56beb102570
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/maven.section.md
@@ -0,0 +1,439 @@
+# Maven {#maven}
+
+Maven is a well-known build tool for the Java ecosystem however it has some challenges when integrating into the Nix build system.
+
+The following provides a list of common patterns with how to package a Maven project (or any JVM language that can export to Maven) as a Nix package.
+
+## Building a package using `maven.buildMavenPackage` {#maven-buildmavenpackage}
+
+Consider the following package:
+
+```nix
+{ lib, fetchFromGitHub, jre, makeWrapper, maven }:
+
+maven.buildMavenPackage rec {
+  pname = "jd-cli";
+  version = "1.2.1";
+
+  src = fetchFromGitHub {
+    owner = "intoolswetrust";
+    repo = pname;
+    rev = "${pname}-${version}";
+    hash = "sha256-rRttA5H0A0c44loBzbKH7Waoted3IsOgxGCD2VM0U/Q=";
+  };
+
+  mvnHash = "sha256-kLpjMj05uC94/5vGMwMlFzLKNFOKeyNvq/vmB6pHTAo=";
+
+  nativeBuildInputs = [ makeWrapper ];
+
+  installPhase = ''
+    mkdir -p $out/bin $out/share/jd-cli
+    install -Dm644 jd-cli/target/jd-cli.jar $out/share/jd-cli
+
+    makeWrapper ${jre}/bin/java $out/bin/jd-cli \
+      --add-flags "-jar $out/share/jd-cli/jd-cli.jar"
+  '';
+
+  meta = {
+    description = "Simple command line wrapper around JD Core Java Decompiler project";
+    homepage = "https://github.com/intoolswetrust/jd-cli";
+    license = lib.licenses.gpl3Plus;
+    maintainers = with lib.maintainers; [ majiir ];
+  };
+}
+```
+
+This package calls `maven.buildMavenPackage` to do its work. The primary difference from `stdenv.mkDerivation` is the `mvnHash` variable, which is a hash of all of the Maven dependencies.
+
+::: {.tip}
+After setting `maven.buildMavenPackage`, we then do standard Java `.jar` installation by saving the `.jar` to `$out/share/java` and then making a wrapper which allows executing that file; see [](#sec-language-java) for additional generic information about packaging Java applications.
+:::
+
+### Stable Maven plugins {#stable-maven-plugins}
+
+Maven defines default versions for its core plugins, e.g. `maven-compiler-plugin`. If your project does not override these versions, an upgrade of Maven will change the version of the used plugins, and therefore the derivation and hash.
+
+When `maven` is upgraded, `mvnHash` for the derivation must be updated as well: otherwise, the project will be built on the derivation of old plugins, and fail because the requested plugins are missing.
+
+This clearly prevents automatic upgrades of Maven: a manual effort must be made throughout nixpkgs by any maintainer wishing to push the upgrades.
+
+To make sure that your package does not add extra manual effort when upgrading Maven, explicitly define versions for all plugins. You can check if this is the case by adding the following plugin to your (parent) POM:
+
+```xml
+<plugin>
+  <groupId>org.apache.maven.plugins</groupId>
+  <artifactId>maven-enforcer-plugin</artifactId>
+  <version>3.3.0</version>
+  <executions>
+    <execution>
+      <id>enforce-plugin-versions</id>
+      <goals>
+        <goal>enforce</goal>
+      </goals>
+      <configuration>
+        <rules>
+          <requirePluginVersions />
+        </rules>
+      </configuration>
+    </execution>
+  </executions>
+</plugin>
+```
+
+## Manually using `mvn2nix` {#maven-mvn2nix}
+::: {.warning}
+This way is no longer recommended; see [](#maven-buildmavenpackage) for the simpler and preferred way.
+:::
+
+For the purposes of this example let's consider a very basic Maven project with the following `pom.xml` with a single dependency on [emoji-java](https://github.com/vdurmont/emoji-java).
+
+```xml
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+        xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>io.github.fzakaria</groupId>
+  <artifactId>maven-demo</artifactId>
+  <version>1.0</version>
+  <packaging>jar</packaging>
+  <name>NixOS Maven Demo</name>
+
+  <dependencies>
+    <dependency>
+        <groupId>com.vdurmont</groupId>
+        <artifactId>emoji-java</artifactId>
+        <version>5.1.1</version>
+      </dependency>
+  </dependencies>
+</project>
+```
+
+Our main class file will be very simple:
+
+```java
+import com.vdurmont.emoji.EmojiParser;
+
+public class Main {
+  public static void main(String[] args) {
+    String str = "NixOS :grinning: is super cool :smiley:!";
+    String result = EmojiParser.parseToUnicode(str);
+    System.out.println(result);
+  }
+}
+```
+
+You find this demo project at [https://github.com/fzakaria/nixos-maven-example](https://github.com/fzakaria/nixos-maven-example).
+
+### Solving for dependencies {#solving-for-dependencies}
+
+#### buildMaven with NixOS/mvn2nix-maven-plugin {#buildmaven-with-nixosmvn2nix-maven-plugin}
+`buildMaven` is an alternative method that tries to follow similar patterns of other programming languages by generating a lock file. It relies on the maven plugin [mvn2nix-maven-plugin](https://github.com/NixOS/mvn2nix-maven-plugin).
+
+First you generate a `project-info.json` file using the maven plugin.
+
+> This should be executed in the project's source repository or be told which `pom.xml` to execute with.
+
+```bash
+# run this step within the project's source repository
+❯ mvn org.nixos.mvn2nix:mvn2nix-maven-plugin:mvn2nix
+
+❯ cat project-info.json | jq | head
+{
+  "project": {
+    "artifactId": "maven-demo",
+    "groupId": "org.nixos",
+    "version": "1.0",
+    "classifier": "",
+    "extension": "jar",
+    "dependencies": [
+      {
+        "artifactId": "maven-resources-plugin",
+```
+
+This file is then given to the `buildMaven` function, and it returns 2 attributes.
+
+**`repo`**:
+    A Maven repository that is a symlink farm of all the dependencies found in the `project-info.json`
+
+
+**`build`**:
+    A simple derivation that runs through `mvn compile` & `mvn package` to build the JAR. You may use this as inspiration for more complicated derivations.
+
+Here is an [example](https://github.com/fzakaria/nixos-maven-example/blob/main/build-maven-repository.nix) of building the Maven repository
+
+```nix
+{ pkgs ? import <nixpkgs> { } }:
+with pkgs;
+(buildMaven ./project-info.json).repo
+```
+
+The benefit over the _double invocation_ as we will see below, is that the _/nix/store_ entry is a _linkFarm_ of every package, so that changes to your dependency set doesn't involve downloading everything from scratch.
+
+```bash
+❯ tree $(nix-build --no-out-link build-maven-repository.nix) | head
+/nix/store/g87va52nkc8jzbmi1aqdcf2f109r4dvn-maven-repository
+├── antlr
+│   └── antlr
+│       └── 2.7.2
+│           ├── antlr-2.7.2.jar -> /nix/store/d027c8f2cnmj5yrynpbq2s6wmc9cb559-antlr-2.7.2.jar
+│           └── antlr-2.7.2.pom -> /nix/store/mv42fc5gizl8h5g5vpywz1nfiynmzgp2-antlr-2.7.2.pom
+├── avalon-framework
+│   └── avalon-framework
+│       └── 4.1.3
+│           ├── avalon-framework-4.1.3.jar -> /nix/store/iv5fp3955w3nq28ff9xfz86wvxbiw6n9-avalon-framework-4.1.3.jar
+```
+
+#### Double Invocation {#double-invocation}
+::: {.note}
+This pattern is the simplest but may cause unnecessary rebuilds due to the output hash changing.
+:::
+
+The double invocation is a _simple_ way to get around the problem that `nix-build` may be sandboxed and have no Internet connectivity.
+
+It treats the entire Maven repository as a single source to be downloaded, relying on Maven's dependency resolution to satisfy the output hash. This is similar to fetchers like `fetchgit`, except it has to run a Maven build to determine what to download.
+
+The first step will be to build the Maven project as a fixed-output derivation in order to collect the Maven repository -- below is an [example](https://github.com/fzakaria/nixos-maven-example/blob/main/double-invocation-repository.nix).
+
+::: {.note}
+Traditionally the Maven repository is at `~/.m2/repository`. We will override this to be the `$out` directory.
+:::
+
+```nix
+{ lib, stdenv, maven }:
+stdenv.mkDerivation {
+  name = "maven-repository";
+  buildInputs = [ maven ];
+  src = ./.; # or fetchFromGitHub, cleanSourceWith, etc
+  buildPhase = ''
+    mvn package -Dmaven.repo.local=$out
+  '';
+
+  # keep only *.{pom,jar,sha1,nbm} and delete all ephemeral files with lastModified timestamps inside
+  installPhase = ''
+    find $out -type f \
+      -name \*.lastUpdated -or \
+      -name resolver-status.properties -or \
+      -name _remote.repositories \
+      -delete
+  '';
+
+  # don't do any fixup
+  dontFixup = true;
+  outputHashAlgo = "sha256";
+  outputHashMode = "recursive";
+  # replace this with the correct SHA256
+  outputHash = lib.fakeSha256;
+}
+```
+
+The build will fail, and tell you the expected `outputHash` to place. When you've set the hash, the build will return with a `/nix/store` entry whose contents are the full Maven repository.
+
+::: {.warning}
+Some additional files are deleted that would cause the output hash to change potentially on subsequent runs.
+:::
+
+```bash
+❯ tree $(nix-build --no-out-link double-invocation-repository.nix) | head
+/nix/store/8kicxzp98j68xyi9gl6jda67hp3c54fq-maven-repository
+├── backport-util-concurrent
+│   └── backport-util-concurrent
+│       └── 3.1
+│           ├── backport-util-concurrent-3.1.pom
+│           └── backport-util-concurrent-3.1.pom.sha1
+├── classworlds
+│   └── classworlds
+│       ├── 1.1
+│       │   ├── classworlds-1.1.jar
+```
+
+If your package uses _SNAPSHOT_ dependencies or _version ranges_; there is a strong likelihood that over-time your output hash will change since the resolved dependencies may change. Hence this method is less recommended then using `buildMaven`.
+
+### Building a JAR {#building-a-jar}
+
+Regardless of which strategy is chosen above, the step to build the derivation is the same.
+
+```nix
+{ stdenv, maven, callPackage }:
+# pick a repository derivation, here we will use buildMaven
+let repository = callPackage ./build-maven-repository.nix { };
+in stdenv.mkDerivation rec {
+  pname = "maven-demo";
+  version = "1.0";
+
+  src = builtins.fetchTarball "https://github.com/fzakaria/nixos-maven-example/archive/main.tar.gz";
+  buildInputs = [ maven ];
+
+  buildPhase = ''
+    echo "Using repository ${repository}"
+    mvn --offline -Dmaven.repo.local=${repository} package;
+  '';
+
+  installPhase = ''
+    install -Dm644 target/${pname}-${version}.jar $out/share/java
+  '';
+}
+```
+
+::: {.tip}
+We place the library in `$out/share/java` since JDK package has a _stdenv setup hook_ that adds any JARs in the `share/java` directories of the build inputs to the CLASSPATH environment.
+:::
+
+```bash
+❯ tree $(nix-build --no-out-link build-jar.nix)
+/nix/store/7jw3xdfagkc2vw8wrsdv68qpsnrxgvky-maven-demo-1.0
+└── share
+    └── java
+        └── maven-demo-1.0.jar
+
+2 directories, 1 file
+```
+
+### Runnable JAR {#runnable-jar}
+
+The previous example builds a `jar` file but that's not a file one can run.
+
+You need to use it with `java -jar $out/share/java/output.jar` and make sure to provide the required dependencies on the classpath.
+
+The following explains how to use `makeWrapper` in order to make the derivation produce an executable that will run the JAR file you created.
+
+We will use the same repository we built above (either _double invocation_ or _buildMaven_) to setup a CLASSPATH for our JAR.
+
+The following two methods are more suited to Nix then building an [UberJar](https://imagej.net/Uber-JAR) which may be the more traditional approach.
+
+#### CLASSPATH {#classpath}
+
+This method is ideal if you are providing a derivation for _nixpkgs_ and don't want to patch the project's `pom.xml`.
+
+We will read the Maven repository and flatten it to a single list. This list will then be concatenated with the _CLASSPATH_ separator to create the full classpath.
+
+We make sure to provide this classpath to the `makeWrapper`.
+
+```nix
+{ stdenv, maven, callPackage, makeWrapper, jre }:
+let
+  repository = callPackage ./build-maven-repository.nix { };
+in stdenv.mkDerivation rec {
+  pname = "maven-demo";
+  version = "1.0";
+
+  src = builtins.fetchTarball
+    "https://github.com/fzakaria/nixos-maven-example/archive/main.tar.gz";
+  nativeBuildInputs = [ makeWrapper ];
+  buildInputs = [ maven ];
+
+  buildPhase = ''
+    echo "Using repository ${repository}"
+    mvn --offline -Dmaven.repo.local=${repository} package;
+  '';
+
+  installPhase = ''
+    mkdir -p $out/bin
+
+    classpath=$(find ${repository} -name "*.jar" -printf ':%h/%f');
+    install -Dm644 target/${pname}-${version}.jar $out/share/java
+    # create a wrapper that will automatically set the classpath
+    # this should be the paths from the dependency derivation
+    makeWrapper ${jre}/bin/java $out/bin/${pname} \
+          --add-flags "-classpath $out/share/java/${pname}-${version}.jar:''${classpath#:}" \
+          --add-flags "Main"
+  '';
+}
+```
+
+#### MANIFEST file via Maven Plugin {#manifest-file-via-maven-plugin}
+
+This method is ideal if you are the project owner and want to change your `pom.xml` to set the CLASSPATH within it.
+
+Augment the `pom.xml` to create a JAR with the following manifest:
+
+```xml
+<build>
+  <plugins>
+    <plugin>
+        <artifactId>maven-jar-plugin</artifactId>
+        <configuration>
+            <archive>
+                <manifest>
+                    <addClasspath>true</addClasspath>
+                    <classpathPrefix>../../repository/</classpathPrefix>
+                    <classpathLayoutType>repository</classpathLayoutType>
+                    <mainClass>Main</mainClass>
+                </manifest>
+                <manifestEntries>
+                    <Class-Path>.</Class-Path>
+                </manifestEntries>
+            </archive>
+        </configuration>
+    </plugin>
+  </plugins>
+</build>
+```
+
+The above plugin instructs the JAR to look for the necessary dependencies in the `lib/` relative folder. The layout of the folder is also in the _maven repository_ style.
+
+```bash
+❯ unzip -q -c $(nix-build --no-out-link runnable-jar.nix)/share/java/maven-demo-1.0.jar META-INF/MANIFEST.MF
+
+Manifest-Version: 1.0
+Archiver-Version: Plexus Archiver
+Built-By: nixbld
+Class-Path: . ../../repository/com/vdurmont/emoji-java/5.1.1/emoji-jav
+ a-5.1.1.jar ../../repository/org/json/json/20170516/json-20170516.jar
+Created-By: Apache Maven 3.6.3
+Build-Jdk: 1.8.0_265
+Main-Class: Main
+```
+
+We will modify the derivation above to add a symlink to our repository so that it's accessible to our JAR during the `installPhase`.
+
+```nix
+{ stdenv, maven, callPackage, makeWrapper, jre }:
+# pick a repository derivation, here we will use buildMaven
+let repository = callPackage ./build-maven-repository.nix { };
+in stdenv.mkDerivation rec {
+  pname = "maven-demo";
+  version = "1.0";
+
+  src = builtins.fetchTarball
+    "https://github.com/fzakaria/nixos-maven-example/archive/main.tar.gz";
+  nativeBuildInputs = [ makeWrapper ];
+  buildInputs = [ maven ];
+
+  buildPhase = ''
+    echo "Using repository ${repository}"
+    mvn --offline -Dmaven.repo.local=${repository} package;
+  '';
+
+  installPhase = ''
+    mkdir -p $out/bin
+
+    # create a symbolic link for the repository directory
+    ln -s ${repository} $out/repository
+
+    install -Dm644 target/${pname}-${version}.jar $out/share/java
+    # create a wrapper that will automatically set the classpath
+    # this should be the paths from the dependency derivation
+    makeWrapper ${jre}/bin/java $out/bin/${pname} \
+          --add-flags "-jar $out/share/java/${pname}-${version}.jar"
+  '';
+}
+```
+::: {.note}
+Our script produces a dependency on `jre` rather than `jdk` to restrict the runtime closure necessary to run the application.
+:::
+
+This will give you an executable shell-script that launches your JAR with all the dependencies available.
+
+```bash
+❯ tree $(nix-build --no-out-link runnable-jar.nix)
+/nix/store/8d4c3ibw8ynsn01ibhyqmc1zhzz75s26-maven-demo-1.0
+├── bin
+│   └── maven-demo
+├── repository -> /nix/store/g87va52nkc8jzbmi1aqdcf2f109r4dvn-maven-repository
+└── share
+    └── java
+        └── maven-demo-1.0.jar
+
+❯ $(nix-build --no-out-link --option tarball-ttl 1 runnable-jar.nix)/bin/maven-demo
+NixOS 😀 is super cool 😃!
+```
diff --git a/nixpkgs/doc/languages-frameworks/nim.section.md b/nixpkgs/doc/languages-frameworks/nim.section.md
new file mode 100644
index 000000000000..c6ebf49b83f6
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/nim.section.md
@@ -0,0 +1,125 @@
+# Nim {#nim}
+
+The Nim compiler and a builder function is available.
+Nim programs are built using `buildNimPackage` and a lockfile containing Nim dependencies.
+
+The following example shows a Nim program that depends only on Nim libraries:
+```nix
+{ lib, buildNimPackage, fetchFromGitHub }:
+
+buildNimPackage { } (finalAttrs: {
+  pname = "ttop";
+  version = "1.2.7";
+
+  src = fetchFromGitHub {
+    owner = "inv2004";
+    repo = "ttop";
+    rev = "v${finalAttrs.version}";
+    hash = "sha256-oPdaUqh6eN1X5kAYVvevOndkB/xnQng9QVLX9bu5P5E=";
+  };
+
+  lockFile = ./lock.json;
+
+  nimFlags = [
+    "-d:NimblePkgVersion=${finalAttrs.version}"
+  ];
+})
+```
+
+## `buildNimPackage` parameters {#buildnimpackage-parameters}
+
+The `buildNimPackage` function takes an attrset of parameters that are passed on to `stdenv.mkDerivation`.
+
+The following parameters are specific to `buildNimPackage`:
+
+* `lockFile`: JSON formatted lockfile.
+* `nimbleFile`: Specify the Nimble file location of the package being built
+  rather than discover the file at build-time.
+* `nimRelease ? true`: Build the package in *release* mode.
+* `nimDefines ? []`: A list of Nim defines. Key-value tuples are not supported.
+* `nimFlags ? []`: A list of command line arguments to pass to the Nim compiler.
+  Use this to specify defines with arguments in the form of `-d:${name}=${value}`.
+* `nimDoc` ? false`: Build and install HTML documentation.
+
+## Lockfiles {#nim-lockfiles}
+Nim lockfiles are created with the `nim_lk` utility.
+Run `nim_lk` with the source directory as an argument and it will print a lockfile to stdout.
+```sh
+$ cd nixpkgs
+$ nix build -f . ttop.src
+$ nix run -f . nim_lk ./result | jq --sort-keys > pkgs/by-name/tt/ttop/lock.json
+```
+
+## Overriding Nim packages {#nim-overrides}
+
+The `buildNimPackage` function generates flags and additional build dependencies from the `lockFile` parameter passed to `buildNimPackage`. Using [`overrideAttrs`](#sec-pkg-overrideAttrs) on the final package will apply after this has already been generated, so this can't be used to override the `lockFile` in a package built with `buildNimPackage`. To be able to override parameters before flags and build dependencies are generated from the `lockFile`, use `overrideNimAttrs` instead with the same syntax as `overrideAttrs`:
+
+```nix
+pkgs.nitter.overrideNimAttrs {
+  # using a different source which has different dependencies from the standard package
+  src = pkgs.fetchFromGithub { /* … */ };
+  # new lock file generated from the source
+  lockFile = ./custom-lock.json;
+}
+```
+
+## Lockfile dependency overrides {#nim-lock-overrides}
+
+The `buildNimPackage` function matches the libraries specified by `lockFile` to attrset of override functions that are then applied to the package derivation.
+The default overrides are maintained as the top-level `nimOverrides` attrset at `pkgs/top-level/nim-overrides.nix`.
+
+For example, to propagate a dependency on SDL2 for lockfiles that select the Nim `sdl2` library, an overlay is added to the set in the `nim-overrides.nix` file:
+```nix
+{ lib
+/* … */
+, SDL2
+/* … */
+}:
+
+{
+  /* … */
+  sdl2 =
+    lockAttrs:
+    finalAttrs:
+    { buildInputs ? [ ], ... }:
+    {
+      buildInputs = buildInputs ++ [ SDL2 ];
+    };
+  /* … */
+}
+```
+
+The annotations in the `nim-overrides.nix` set are functions that take three arguments and return a new attrset to be overlayed on the package being built.
+- lockAttrs: the attrset for this library from within a lockfile. This can be used to implement library version constraints, such as marking libraries as broken or insecure.
+- finalAttrs: the final attrset passed by `buildNimPackage` to `stdenv.mkDerivation`.
+- prevAttrs: the attrset produced by initial arguments to `buildNimPackage` and any preceding lockfile overlays.
+
+### Overriding an Nim library override {#nim-lock-overrides-overrides}
+
+The `nimOverrides` attrset makes it possible to modify overrides in a few different ways.
+
+Override a package internal to its definition:
+```nix
+{ lib, buildNimPackage, nimOverrides, libressl }:
+
+let
+  buildNimPackage' = buildNimPackage.override {
+    nimOverrides = nimOverrides.override { openssl = libressl; };
+  };
+in buildNimPackage' (finalAttrs: {
+  pname = "foo";
+  # …
+})
+
+```
+
+Override a package externally:
+```nix
+{ pkgs }: {
+  foo = pkgs.foo.override {
+    buildNimPackage = pkgs.buildNimPackage.override {
+      nimOverrides = pkgs.nimOverrides.override { openssl = libressl; };
+    };
+  };
+}
+```
diff --git a/nixpkgs/doc/languages-frameworks/ocaml.section.md b/nixpkgs/doc/languages-frameworks/ocaml.section.md
new file mode 100644
index 000000000000..44f514e90a1b
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/ocaml.section.md
@@ -0,0 +1,134 @@
+# OCaml {#sec-language-ocaml}
+
+## User guide {#sec-language-ocaml-user-guide}
+
+OCaml libraries are available in attribute sets of the form `ocaml-ng.ocamlPackages_X_XX` where X is to be replaced with the desired compiler version. For example, ocamlgraph compiled with OCaml 4.12 can be found in `ocaml-ng.ocamlPackages_4_12.ocamlgraph`. The compiler itself is also located in this set, under the name `ocaml`.
+
+If you don't care about the exact compiler version, `ocamlPackages` is a top-level alias pointing to a recent version of OCaml.
+
+OCaml applications are usually available top-level, and not inside `ocamlPackages`. Notable exceptions are build tools that must be built with the same compiler version as the compiler you intend to use like `dune` or `ocaml-lsp`.
+
+To open a shell able to build a typical OCaml project, put the dependencies in `buildInputs` and add `ocamlPackages.ocaml` and `ocamlPackages.findlib` to `nativeBuildInputs` at least.
+For example:
+```nix
+let
+ pkgs = import <nixpkgs> {};
+ # choose the ocaml version you want to use
+ ocamlPackages = pkgs.ocaml-ng.ocamlPackages_4_12;
+in
+pkgs.mkShell {
+  # build tools
+  nativeBuildInputs = with ocamlPackages; [ ocaml findlib dune_2 ocaml-lsp ];
+  # dependencies
+  buildInputs = with ocamlPackages; [ ocamlgraph ];
+}
+```
+
+## Packaging guide {#sec-language-ocaml-packaging}
+
+OCaml libraries should be installed in `$(out)/lib/ocaml/${ocaml.version}/site-lib/`. Such directories are automatically added to the `$OCAMLPATH` environment variable when building another package that depends on them or when opening a `nix-shell`.
+
+Given that most of the OCaml ecosystem is now built with dune, nixpkgs includes a convenience build support function called `buildDunePackage` that will build an OCaml package using dune, OCaml and findlib and any additional dependencies provided as `buildInputs` or `propagatedBuildInputs`.
+
+Here is a simple package example.
+
+- It defines an (optional) attribute `minimalOCamlVersion` (see note below)
+  that will be used to throw a descriptive evaluation error if building with
+  an older OCaml is attempted.
+
+- It uses the `fetchFromGitHub` fetcher to get its source.
+
+- It also accept `duneVersion` parameter (valid value are `"1"`, `"2"`, and
+  `"3"`). The recommended practice it to set only if you don't want the default
+  value and/or it depends on something else like package version. You might see
+  a not-supported argument `useDune2`. The behavior was `useDune2 = true;` =>
+  `duneVersion = "2";` and `useDune2 = false;` => `duneVersion = "1";`. It was
+  used at the time when dune3 didn't existed.
+
+- It sets the optional `doCheck` attribute such that tests will be run with
+  `dune runtest -p angstrom` after the build (`dune build -p angstrom`) is
+  complete, but only if the Ocaml version is at at least `"4.05"`.
+
+- It uses the package `ocaml-syntax-shims` as a build input, `alcotest` and
+  `ppx_let` as check inputs (because they are needed to run the tests), and
+  `bigstringaf` and `result` as propagated build inputs (thus they will also be
+  available to libraries depending on this library).
+
+- The library will be installed using the `angstrom.install` file that dune
+  generates.
+
+```nix
+{ lib,
+  fetchFromGitHub,
+  buildDunePackage,
+  ocaml,
+  ocaml-syntax-shims,
+  alcotest,
+  result,
+  bigstringaf,
+  ppx_let }:
+
+buildDunePackage rec {
+  pname = "angstrom";
+  version = "0.15.0";
+
+  minimalOCamlVersion = "4.04";
+
+  src = fetchFromGitHub {
+    owner  = "inhabitedtype";
+    repo   = pname;
+    rev    = version;
+    hash   = "sha256-MK8o+iPGANEhrrTc1Kz9LBilx2bDPQt7Pp5P2libucI=";
+  };
+
+  checkInputs = [ alcotest ppx_let ];
+  buildInputs = [ ocaml-syntax-shims ];
+  propagatedBuildInputs = [ bigstringaf result ];
+  doCheck = lib.versionAtLeast ocaml.version "4.05";
+
+  meta = {
+    homepage = "https://github.com/inhabitedtype/angstrom";
+    description = "OCaml parser combinators built for speed and memory efficiency";
+    license = lib.licenses.bsd3;
+    maintainers = with lib.maintainers; [ sternenseemann ];
+  };
+}
+```
+
+Here is a second example, this time using a source archive generated with `dune-release`. It is a good idea to use this archive when it is available as it will usually contain substituted variables such as a `%%VERSION%%` field. This library does not depend on any other OCaml library and no tests are run after building it.
+
+```nix
+{ lib, fetchurl, buildDunePackage }:
+
+buildDunePackage rec {
+  pname = "wtf8";
+  version = "1.0.2";
+
+  minimalOCamlVersion = "4.02";
+
+  src = fetchurl {
+    url = "https://github.com/flowtype/ocaml-${pname}/releases/download/v${version}/${pname}-v${version}.tbz";
+    hash = "sha256-d5/3KUBAWRj8tntr4RkJ74KWW7wvn/B/m1nx0npnzyc=";
+  };
+
+  meta = {
+    homepage = "https://github.com/flowtype/ocaml-wtf8";
+    description = "WTF-8 is a superset of UTF-8 that allows unpaired surrogates.";
+    license = lib.licenses.mit;
+    maintainers = [ lib.maintainers.eqyiel ];
+  };
+}
+```
+
+Note about `minimalOCamlVersion`.  A deprecated version of this argument was
+spelled `minimumOCamlVersion`; setting the old attribute wrongly modifies the
+derivation hash and is therefore inappropriate. As a technical dept, currently
+packaged libraries may still use the old spelling: maintainers are invited to
+fix this when updating packages. Massive renaming is strongly discouraged as it
+would be challenging to review, difficult to test, and will cause unnecessary
+rebuild.
+
+The build will automatically fail if two distinct versions of the same library
+are added to `buildInputs` (which usually happens transitively because of
+`propagatedBuildInputs`). Set `dontDetectOcamlConflicts` to true to disable this
+behavior.
diff --git a/nixpkgs/doc/languages-frameworks/octave.section.md b/nixpkgs/doc/languages-frameworks/octave.section.md
new file mode 100644
index 000000000000..4ad2cb0d5fbf
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/octave.section.md
@@ -0,0 +1,92 @@
+# Octave {#sec-octave}
+
+## Introduction {#ssec-octave-introduction}
+
+Octave is a modular scientific programming language and environment.
+A majority of the packages supported by Octave from their [website](https://octave.sourceforge.io/packages.php) are packaged in nixpkgs.
+
+## Structure {#ssec-octave-structure}
+
+All Octave add-on packages are available in two ways:
+1. Under the top-level `Octave` attribute, `octave.pkgs`.
+2. As a top-level attribute, `octavePackages`.
+
+## Packaging Octave Packages {#ssec-octave-packaging}
+
+Nixpkgs provides a function `buildOctavePackage`, a generic package builder function for any Octave package that complies with the Octave's current packaging format.
+
+All Octave packages are defined in [pkgs/top-level/octave-packages.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/top-level/octave-packages.nix) rather than `pkgs/all-packages.nix`.
+Each package is defined in their own file in the [pkgs/development/octave-modules](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/octave-modules) directory.
+Octave packages are made available through `all-packages.nix` through both the attribute `octavePackages` and `octave.pkgs`.
+You can test building an Octave package as follows:
+
+```ShellSession
+$ nix-build -A octavePackages.symbolic
+```
+
+To install it into your user profile, run this command from the root of the repository:
+
+```ShellSession
+$ nix-env -f. -iA octavePackages.symbolic
+```
+
+You can build Octave with packages by using the `withPackages` passed-through function.
+
+```ShellSession
+$ nix-shell -p 'octave.withPackages (ps: with ps; [ symbolic ])'
+```
+
+This will also work in a `shell.nix` file.
+
+```nix
+{ pkgs ? import <nixpkgs> { }}:
+
+pkgs.mkShell {
+  nativeBuildInputs = with pkgs; [
+    (octave.withPackages (opkgs: with opkgs; [ symbolic ]))
+  ];
+}
+```
+
+### `buildOctavePackage` Steps {#sssec-buildOctavePackage-steps}
+
+The `buildOctavePackage` does several things to make sure things work properly.
+
+1. Sets the environment variable `OCTAVE_HISTFILE` to `/dev/null` during package compilation so that the commands run through the Octave interpreter directly are not logged.
+2. Skips the configuration step, because the packages are stored as gzipped tarballs, which Octave itself handles directly.
+3. Change the hierarchy of the tarball so that only a single directory is at the top-most level of the tarball.
+4. Use Octave itself to run the `pkg build` command, which unzips the tarball, extracts the necessary files written in Octave, and compiles any code written in C++ or Fortran, and places the fully compiled artifact in `$out`.
+
+`buildOctavePackage` is built on top of `stdenv` in a standard way, allowing most things to be customized.
+
+### Handling Dependencies {#sssec-octave-handling-dependencies}
+
+In Octave packages, there are four sets of dependencies that can be specified:
+
+`nativeBuildInputs`
+: Just like other packages, `nativeBuildInputs` is intended for architecture-dependent build-time-only dependencies.
+
+`buildInputs`
+: Like other packages, `buildInputs` is intended for architecture-independent build-time-only dependencies.
+
+`propagatedBuildInputs`
+: Similar to other packages, `propagatedBuildInputs` is intended for packages that are required for both building and running of the package.
+See [Symbolic](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/octave-modules/symbolic/default.nix) for how this works and why it is needed.
+
+`requiredOctavePackages`
+: This is a special dependency that ensures the specified Octave packages are dependent on others, and are made available simultaneously when loading them in Octave.
+
+### Installing Octave Packages {#sssec-installing-octave-packages}
+
+By default, the `buildOctavePackage` function does _not_ install the requested package into Octave for use.
+The function will only build the requested package.
+This is due to Octave maintaining an text-based database about which packages are installed where.
+To this end, when all the requested packages have been built, the Octave package and all its add-on packages are put together into an environment, similar to Python.
+
+1. First, all the Octave binaries are wrapped with the environment variable `OCTAVE_SITE_INITFILE` set to a file in `$out`, which is required for Octave to be able to find the non-standard package database location.
+2. Because of the way `buildEnv` works, all tarballs that are present (which should be all Octave packages to install) should be removed.
+3. The path down to the default install location of Octave packages is recreated so that Nix-operated Octave can install the packages.
+4. Install the packages into the `$out` environment while writing package entries to the database file.
+This database file is unique for each different (according to Nix) environment invocation.
+5. Rewrite the Octave-wide startup file to read from the list of packages installed in that particular environment.
+6. Wrap any programs that are required by the Octave packages so that they work with all the paths defined within the environment.
diff --git a/nixpkgs/doc/languages-frameworks/perl.section.md b/nixpkgs/doc/languages-frameworks/perl.section.md
new file mode 100644
index 000000000000..4ef6d173a178
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/perl.section.md
@@ -0,0 +1,171 @@
+# Perl {#sec-language-perl}
+
+## Running Perl programs on the shell {#ssec-perl-running}
+
+When executing a Perl script, it is possible you get an error such as `./myscript.pl: bad interpreter: /usr/bin/perl: no such file or directory`. This happens when the script expects Perl to be installed at `/usr/bin/perl`, which is not the case when using Perl from nixpkgs. You can fix the script by changing the first line to:
+
+```perl
+#!/usr/bin/env perl
+```
+
+to take the Perl installation from the `PATH` environment variable, or invoke Perl directly with:
+
+```ShellSession
+$ perl ./myscript.pl
+```
+
+When the script is using a Perl library that is not installed globally, you might get an error such as `Can't locate DB_File.pm in @INC (you may need to install the DB_File module)`. In that case, you can use `nix-shell` to start an ad-hoc shell with that library installed, for instance:
+
+```ShellSession
+$ nix-shell -p perl perlPackages.DBFile --run ./myscript.pl
+```
+
+If you are always using the script in places where `nix-shell` is available, you can embed the `nix-shell` invocation in the shebang like this:
+
+```perl
+#!/usr/bin/env nix-shell
+#! nix-shell -i perl -p perl perlPackages.DBFile
+```
+
+## Packaging Perl programs {#ssec-perl-packaging}
+
+Nixpkgs provides a function `buildPerlPackage`, a generic package builder function for any Perl package that has a standard `Makefile.PL`. It’s implemented in [pkgs/development/perl-modules/generic](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/perl-modules/generic).
+
+Perl packages from CPAN are defined in [pkgs/top-level/perl-packages.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/top-level/perl-packages.nix) rather than `pkgs/all-packages.nix`. Most Perl packages are so straight-forward to build that they are defined here directly, rather than having a separate function for each package called from `perl-packages.nix`. However, more complicated packages should be put in a separate file, typically in `pkgs/development/perl-modules`. Here is an example of the former:
+
+```nix
+{
+  ClassC3 = buildPerlPackage rec {
+    pname = "Class-C3";
+    version = "0.21";
+    src = fetchurl {
+      url = "mirror://cpan/authors/id/F/FL/FLORA/${pname}-${version}.tar.gz";
+      hash = "sha256-/5GE5xHT0uYGOQxroqj6LMU7CtKn2s6vMVoSXxL4iK4=";
+    };
+  };
+}
+```
+
+Note the use of `mirror://cpan/`, and the `pname` and `version` in the URL definition to ensure that the `pname` attribute is consistent with the source that we’re actually downloading. Perl packages are made available in `all-packages.nix` through the variable `perlPackages`. For instance, if you have a package that needs `ClassC3`, you would typically write
+
+```nix
+{
+  foo = import ../path/to/foo.nix {
+    inherit stdenv fetchurl /* ... */;
+    inherit (perlPackages) ClassC3;
+  };
+}
+```
+
+in `all-packages.nix`. You can test building a Perl package as follows:
+
+```ShellSession
+$ nix-build -A perlPackages.ClassC3
+```
+
+To install it with `nix-env` instead: `nix-env -f. -iA perlPackages.ClassC3`.
+
+So what does `buildPerlPackage` do? It does the following:
+
+1. In the configure phase, it calls `perl Makefile.PL` to generate a Makefile. You can set the variable `makeMakerFlags` to pass flags to `Makefile.PL`
+2. It adds the contents of the `PERL5LIB` environment variable to `#! .../bin/perl` line of Perl scripts as `-Idir` flags. This ensures that a script can find its dependencies. (This can cause this shebang line to become too long for Darwin to handle; see the note below.)
+3. In the fixup phase, it writes the propagated build inputs (`propagatedBuildInputs`) to the file `$out/nix-support/propagated-user-env-packages`. `nix-env` recursively installs all packages listed in this file when you install a package that has it. This ensures that a Perl package can find its dependencies.
+
+`buildPerlPackage` is built on top of `stdenv`, so everything can be customised in the usual way. For instance, the `BerkeleyDB` module has a `preConfigure` hook to generate a configuration file used by `Makefile.PL`:
+
+```nix
+{ buildPerlPackage, fetchurl, db }:
+
+buildPerlPackage rec {
+  pname = "BerkeleyDB";
+  version = "0.36";
+
+  src = fetchurl {
+    url = "mirror://cpan/authors/id/P/PM/PMQS/${pname}-${version}.tar.gz";
+    hash = "sha256-4Y+HGgGQqcOfdiKcFIyMrWBEccVNVAMDBWZlFTMorh8=";
+  };
+
+  preConfigure = ''
+    echo "LIB = ${db.out}/lib" > config.in
+    echo "INCLUDE = ${db.dev}/include" >> config.in
+  '';
+}
+```
+
+Dependencies on other Perl packages can be specified in the `buildInputs` and `propagatedBuildInputs` attributes. If something is exclusively a build-time dependency, use `buildInputs`; if it’s (also) a runtime dependency, use `propagatedBuildInputs`. For instance, this builds a Perl module that has runtime dependencies on a bunch of other modules:
+
+```nix
+{
+  ClassC3Componentised = buildPerlPackage rec {
+    pname = "Class-C3-Componentised";
+    version = "1.0004";
+    src = fetchurl {
+      url = "mirror://cpan/authors/id/A/AS/ASH/${pname}-${version}.tar.gz";
+      hash = "sha256-ASO9rV/FzJYZ0BH572Fxm2ZrFLMZLFATJng1NuU4FHc=";
+    };
+    propagatedBuildInputs = [
+      ClassC3 ClassInspector TestException MROCompat
+    ];
+  };
+}
+```
+
+On Darwin, if a script has too many `-Idir` flags in its first line (its “shebang line”), it will not run. This can be worked around by calling the `shortenPerlShebang` function from the `postInstall` phase:
+
+```nix
+{ lib, stdenv, buildPerlPackage, fetchurl, shortenPerlShebang }:
+
+{
+  ImageExifTool = buildPerlPackage {
+    pname = "Image-ExifTool";
+    version = "12.50";
+
+    src = fetchurl {
+      url = "https://exiftool.org/${pname}-${version}.tar.gz";
+      hash = "sha256-vOhB/FwQMC8PPvdnjDvxRpU6jAZcC6GMQfc0AH4uwKg=";
+    };
+
+    nativeBuildInputs = lib.optional stdenv.isDarwin shortenPerlShebang;
+    postInstall = lib.optionalString stdenv.isDarwin ''
+      shortenPerlShebang $out/bin/exiftool
+    '';
+  };
+}
+```
+
+This will remove the `-I` flags from the shebang line, rewrite them in the `use lib` form, and put them on the next line instead. This function can be given any number of Perl scripts as arguments; it will modify them in-place.
+
+### Generation from CPAN {#ssec-generation-from-CPAN}
+
+Nix expressions for Perl packages can be generated (almost) automatically from CPAN. This is done by the program `nix-generate-from-cpan`, which can be installed as follows:
+
+```ShellSession
+$ nix-env -f "<nixpkgs>" -iA nix-generate-from-cpan
+```
+
+Substitute `<nixpkgs>` by the path of a nixpkgs clone to use the latest version.
+
+This program takes a Perl module name, looks it up on CPAN, fetches and unpacks the corresponding package, and prints a Nix expression on standard output. For example:
+
+```ShellSession
+$ nix-generate-from-cpan XML::Simple
+  XMLSimple = buildPerlPackage rec {
+    pname = "XML-Simple";
+    version = "2.22";
+    src = fetchurl {
+      url = "mirror://cpan/authors/id/G/GR/GRANTM/XML-Simple-2.22.tar.gz";
+      hash = "sha256-uUUO8i6pZErl1q2ghtxDAPoQW+BQogMOvU79KMGY60k=";
+    };
+    propagatedBuildInputs = [ XMLNamespaceSupport XMLSAX XMLSAXExpat ];
+    meta = {
+      description = "An API for simple XML files";
+      license = with lib.licenses; [ artistic1 gpl1Plus ];
+    };
+  };
+```
+
+The output can be pasted into `pkgs/top-level/perl-packages.nix` or wherever else you need it.
+
+### Cross-compiling modules {#ssec-perl-cross-compilation}
+
+Nixpkgs has experimental support for cross-compiling Perl modules. In many cases, it will just work out of the box, even for modules with native extensions. Sometimes, however, the Makefile.PL for a module may (indirectly) import a native module. In that case, you will need to make a stub for that module that will satisfy the Makefile.PL and install it into `lib/perl5/site_perl/cross_perl/${perl.version}`. See the `postInstall` for `DBI` for an example.
diff --git a/nixpkgs/doc/languages-frameworks/php.section.md b/nixpkgs/doc/languages-frameworks/php.section.md
new file mode 100644
index 000000000000..c1493588a606
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/php.section.md
@@ -0,0 +1,293 @@
+# PHP {#sec-php}
+
+## User Guide {#ssec-php-user-guide}
+
+### Overview {#ssec-php-user-guide-overview}
+
+Several versions of PHP are available on Nix, each of which having a
+wide variety of extensions and libraries available.
+
+The different versions of PHP that nixpkgs provides are located under
+attributes named based on major and minor version number; e.g.,
+`php81` is PHP 8.1.
+
+Only versions of PHP that are supported by upstream for the entirety
+of a given NixOS release will be included in that release of
+NixOS. See [PHP Supported
+Versions](https://www.php.net/supported-versions.php).
+
+The attribute `php` refers to the version of PHP considered most
+stable and thoroughly tested in nixpkgs for any given release of
+NixOS - not necessarily the latest major release from upstream.
+
+All available PHP attributes are wrappers around their respective
+binary PHP package and provide commonly used extensions this way. The
+real PHP 8.1 package, i.e. the unwrapped one, is available as
+`php81.unwrapped`; see the next section for more details.
+
+Interactive tools built on PHP are put in `php.packages`; composer is
+for example available at `php.packages.composer`.
+
+Most extensions that come with PHP, as well as some popular
+third-party ones, are available in `php.extensions`; for example, the
+opcache extension shipped with PHP is available at
+`php.extensions.opcache` and the third-party ImageMagick extension at
+`php.extensions.imagick`.
+
+### Installing PHP with extensions {#ssec-php-user-guide-installing-with-extensions}
+
+A PHP package with specific extensions enabled can be built using
+`php.withExtensions`. This is a function which accepts an anonymous
+function as its only argument; the function should accept two named
+parameters: `enabled` - a list of currently enabled extensions and
+`all` - the set of all extensions, and return a list of wanted
+extensions. For example, a PHP package with all default extensions and
+ImageMagick enabled:
+
+```nix
+php.withExtensions ({ enabled, all }:
+  enabled ++ [ all.imagick ])
+```
+
+To exclude some, but not all, of the default extensions, you can
+filter the `enabled` list like this:
+
+```nix
+php.withExtensions ({ enabled, all }:
+  (lib.filter (e: e != php.extensions.opcache) enabled)
+  ++ [ all.imagick ])
+```
+
+To build your list of extensions from the ground up, you can
+ignore `enabled`:
+
+```nix
+php.withExtensions ({ all, ... }: with all; [ imagick opcache ])
+```
+
+`php.withExtensions` provides extensions by wrapping a minimal php
+base package, providing a `php.ini` file listing all extensions to be
+loaded. You can access this package through the `php.unwrapped`
+attribute; useful if you, for example, need access to the `dev`
+output. The generated `php.ini` file can be accessed through the
+`php.phpIni` attribute.
+
+If you want a PHP build with extra configuration in the `php.ini`
+file, you can use `php.buildEnv`. This function takes two named and
+optional parameters: `extensions` and `extraConfig`. `extensions`
+takes an extension specification equivalent to that of
+`php.withExtensions`, `extraConfig` a string of additional `php.ini`
+configuration parameters. For example, a PHP package with the opcache
+and ImageMagick extensions enabled, and `memory_limit` set to `256M`:
+
+```nix
+php.buildEnv {
+  extensions = { all, ... }: with all; [ imagick opcache ];
+  extraConfig = "memory_limit=256M";
+}
+```
+
+#### Example setup for `phpfpm` {#ssec-php-user-guide-installing-with-extensions-phpfpm}
+
+You can use the previous examples in a `phpfpm` pool called `foo` as
+follows:
+
+```nix
+let
+  myPhp = php.withExtensions ({ all, ... }: with all; [ imagick opcache ]);
+in {
+  services.phpfpm.pools."foo".phpPackage = myPhp;
+}
+```
+
+```nix
+let
+  myPhp = php.buildEnv {
+    extensions = { all, ... }: with all; [ imagick opcache ];
+    extraConfig = "memory_limit=256M";
+  };
+in {
+  services.phpfpm.pools."foo".phpPackage = myPhp;
+}
+```
+
+#### Example usage with `nix-shell` {#ssec-php-user-guide-installing-with-extensions-nix-shell}
+
+This brings up a temporary environment that contains a PHP interpreter
+with the extensions `imagick` and `opcache` enabled:
+
+```sh
+nix-shell -p 'php.withExtensions ({ all, ... }: with all; [ imagick opcache ])'
+```
+
+### Installing PHP packages with extensions {#ssec-php-user-guide-installing-packages-with-extensions}
+
+All interactive tools use the PHP package you get them from, so all
+packages at `php.packages.*` use the `php` package with its default
+extensions. Sometimes this default set of extensions isn't enough and
+you may want to extend it. A common case of this is the `composer`
+package: a project may depend on certain extensions and `composer`
+won't work with that project unless those extensions are loaded.
+
+Example of building `composer` with additional extensions:
+
+```nix
+(php.withExtensions ({ all, enabled }:
+  enabled ++ (with all; [ imagick redis ]))
+).packages.composer
+```
+
+### Overriding PHP packages {#ssec-php-user-guide-overriding-packages}
+
+`php-packages.nix` form a scope, allowing us to override the packages defined
+within. For example, to apply a patch to a `mysqlnd` extension, you can
+pass an overlay-style function to `php`’s `packageOverrides` argument:
+
+```nix
+php.override {
+  packageOverrides = final: prev: {
+    extensions = prev.extensions // {
+      mysqlnd = prev.extensions.mysqlnd.overrideAttrs (attrs: {
+        patches = attrs.patches or [] ++ [
+          # ...
+        ];
+      });
+    };
+  };
+}
+```
+
+### Building PHP projects {#ssec-building-php-projects}
+
+With [Composer](https://getcomposer.org/), you can effectively build PHP
+projects by streamlining dependency management. As the de-facto standard
+dependency manager for PHP, Composer enables you to declare and manage the
+libraries your project relies on, ensuring a more organized and efficient
+development process.
+
+Composer is not a package manager in the same sense as `Yum` or `Apt` are. Yes,
+it deals with "packages" or libraries, but it manages them on a per-project
+basis, installing them in a directory (e.g. `vendor`) inside your project. By
+default, it does not install anything globally. This idea is not new and
+Composer is strongly inspired by node's `npm` and ruby's `bundler`.
+
+Currently, there is no other PHP tool that offers the same functionality as
+Composer. Consequently, incorporating a helper in Nix to facilitate building
+such applications is a logical choice.
+
+In a Composer project, dependencies are defined in a `composer.json` file,
+while their specific versions are locked in a `composer.lock` file. Some
+Composer-based projects opt to include this `composer.lock` file in their source
+code, while others choose not to.
+
+In Nix, there are multiple approaches to building a Composer-based project.
+
+One such method is the `php.buildComposerProject` helper function, which serves
+as a wrapper around `mkDerivation`.
+
+Using this function, you can build a PHP project that includes both a
+`composer.json` and `composer.lock` file. If the project specifies binaries
+using the `bin` attribute in `composer.json`, these binaries will be
+automatically linked and made accessible in the derivation. In this context,
+"binaries" refer to PHP scripts that are intended to be executable.
+
+To use the helper effectively, add the `vendorHash` attribute, which
+enables the wrapper to handle the heavy lifting.
+
+Internally, the helper operates in three stages:
+
+1. It constructs a `composerRepository` attribute derivation by creating a
+   composer repository on the filesystem containing dependencies specified in
+   `composer.json`. This process uses the function
+   `php.mkComposerRepository` which in turn uses the
+   `php.composerHooks.composerRepositoryHook` hook. Internally this function uses
+   a custom
+   [Composer plugin](https://github.com/nix-community/composer-local-repo-plugin) to
+   generate the repository.
+2. The resulting `composerRepository` derivation is then used by the
+   `php.composerHooks.composerInstallHook` hook, which is responsible for
+   creating the final `vendor` directory.
+3. Any "binary" specified in the `composer.json` are linked and made accessible
+   in the derivation.
+
+As the autoloader optimization can be activated directly within the
+`composer.json` file, we do not enable any autoloader optimization flags.
+
+To customize the PHP version, you can specify the `php` attribute. Similarly, if
+you wish to modify the Composer version, use the `composer` attribute. It is
+important to note that both attributes should be of the `derivation` type.
+
+Here's an example of working code example using `php.buildComposerProject`:
+
+```nix
+{ php, fetchFromGitHub }:
+
+php.buildComposerProject (finalAttrs: {
+  pname = "php-app";
+  version = "1.0.0";
+
+  src = fetchFromGitHub {
+    owner = "git-owner";
+    repo = "git-repo";
+    rev = finalAttrs.version;
+    hash = "sha256-VcQRSss2dssfkJ+iUb5qT+FJ10GHiFDzySigcmuVI+8=";
+  };
+
+  # PHP version containing the `ast` extension enabled
+  php = php.buildEnv {
+    extensions = ({ enabled, all }: enabled ++ (with all; [
+      ast
+    ]));
+  };
+
+  # The composer vendor hash
+  vendorHash = "sha256-86s/F+/5cBAwBqZ2yaGRM5rTGLmou5//aLRK5SA0WiQ=";
+
+  # If the composer.lock file is missing from the repository, add it:
+  # composerLock = ./path/to/composer.lock;
+})
+```
+
+In case the file `composer.lock` is missing from the repository, it is possible
+to specify it using the `composerLock` attribute.
+
+The other method is to use all these methods and hooks individually. This has
+the advantage of building a PHP library within another derivation very easily
+when necessary.
+
+Here's a working code example to build a PHP library using `mkDerivation` and
+separate functions and hooks:
+
+```nix
+{ stdenvNoCC, fetchFromGitHub, php }:
+
+stdenvNoCC.mkDerivation (finalAttrs:
+let
+  src = fetchFromGitHub {
+    owner = "git-owner";
+    repo = "git-repo";
+    rev = finalAttrs.version;
+    hash = "sha256-VcQRSss2dssfkJ+iUb5qT+FJ10GHiFDzySigcmuVI+8=";
+  };
+in {
+  inherit src;
+  pname = "php-app";
+  version = "1.0.0";
+
+  buildInputs = [ php ];
+
+  nativeBuildInputs = [
+    php.packages.composer
+    # This hook will use the attribute `composerRepository`
+    php.composerHooks.composerInstallHook
+  ];
+
+  composerRepository = php.mkComposerRepository {
+    inherit (finalAttrs) src;
+    # Specifying a custom composer.lock since it is not present in the sources.
+    composerLock = ./composer.lock;
+    # The composer vendor hash
+    vendorHash = "sha256-86s/F+/5cBAwBqZ2yaGRM5rTGLmou5//aLRK5SA0WiQ=";
+  };
+})
+```
diff --git a/nixpkgs/doc/languages-frameworks/pkg-config.section.md b/nixpkgs/doc/languages-frameworks/pkg-config.section.md
new file mode 100644
index 000000000000..e5a2b85b6576
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/pkg-config.section.md
@@ -0,0 +1,51 @@
+# pkg-config {#sec-pkg-config}
+
+*pkg-config* is a unified interface for declaring and querying built C/C++ libraries.
+
+Nixpkgs provides a couple of facilities for working with this tool.
+
+## Writing packages providing pkg-config modules {#pkg-config-writing-packages}
+
+Packages should set `meta.pkgConfigModules` with the list of package config modules they provide.
+They should also use `testers.testMetaPkgConfig` to check that the final built package matches that list.
+Additionally, the [`validatePkgConfig` setup hook](https://nixos.org/manual/nixpkgs/stable/#validatepkgconfig), will do extra checks on to-be-installed pkg-config modules.
+
+A good example of all these things is zlib:
+
+```nix
+{ pkg-config, testers, ... }:
+
+stdenv.mkDerivation (finalAttrs: {
+  /* ... */
+
+  nativeBuildInputs = [ pkg-config validatePkgConfig ];
+
+  passthru.tests.pkg-config = testers.testMetaPkgConfig finalAttrs.finalPackage;
+
+  meta = {
+    /* ... */
+    pkgConfigModules = [ "zlib" ];
+  };
+})
+```
+
+## Accessing packages via pkg-config module name {#sec-pkg-config-usage}
+
+### Within Nixpkgs {#sec-pkg-config-usage-internal}
+
+A [setup hook](#setup-hook-pkg-config) is bundled in the `pkg-config` package to bring a derivation's declared build inputs into the environment.
+This will populate environment variables like `PKG_CONFIG_PATH`, `PKG_CONFIG_PATH_FOR_BUILD`, and `PKG_CONFIG_PATH_HOST` based on:
+
+ - how `pkg-config` itself is depended upon
+
+ - how other dependencies are depended upon
+
+For more details see the section on [specifying dependencies in general](#ssec-stdenv-dependencies).
+
+Normal pkg-config commands to look up dependencies by name will then work with those environment variables defined by the hook.
+
+### Externally {#sec-pkg-config-usage-external}
+
+The `defaultPkgConfigPackages` package set is a set of aliases, named after the modules they provide.
+This is meant to be used by language-to-nix integrations.
+Hand-written packages should use the normal Nixpkgs attribute name instead.
diff --git a/nixpkgs/doc/languages-frameworks/python.section.md b/nixpkgs/doc/languages-frameworks/python.section.md
new file mode 100644
index 000000000000..f325af0641f6
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/python.section.md
@@ -0,0 +1,2151 @@
+# Python {#python}
+
+## Reference {#reference}
+
+### Interpreters {#interpreters}
+
+| Package    | Aliases         | Interpreter |
+|------------|-----------------|-------------|
+| python27   | python2, python | CPython 2.7 |
+| python39   |                 | CPython 3.9 |
+| python310  |                 | CPython 3.10 |
+| python311  | python3         | CPython 3.11 |
+| python312  |                 | CPython 3.12 |
+| python313  |                 | CPython 3.13 |
+| pypy27     | pypy2, pypy     | PyPy2.7 |
+| pypy39     | pypy3           | PyPy 3.9 |
+
+The Nix expressions for the interpreters can be found in
+`pkgs/development/interpreters/python`.
+
+All packages depending on any Python interpreter get appended
+`out/{python.sitePackages}` to `$PYTHONPATH` if such directory
+exists.
+
+#### Missing `tkinter` module standard library {#missing-tkinter-module-standard-library}
+
+To reduce closure size the `Tkinter`/`tkinter` is available as a separate package, `pythonPackages.tkinter`.
+
+#### Attributes on interpreters packages {#attributes-on-interpreters-packages}
+
+Each interpreter has the following attributes:
+
+- `libPrefix`. Name of the folder in `${python}/lib/` for corresponding interpreter.
+- `interpreter`. Alias for `${python}/bin/${executable}`.
+- `buildEnv`. Function to build python interpreter environments with extra packages bundled together. See [](#python.buildenv-function) for usage and documentation.
+- `withPackages`. Simpler interface to `buildEnv`. See [](#python.withpackages-function) for usage and documentation.
+- `sitePackages`. Alias for `lib/${libPrefix}/site-packages`.
+- `executable`. Name of the interpreter executable, e.g. `python3.10`.
+- `pkgs`. Set of Python packages for that specific interpreter. The package set can be modified by overriding the interpreter and passing `packageOverrides`.
+
+### Building packages and applications {#building-packages-and-applications}
+
+Python libraries and applications that use `setuptools` or
+`distutils` are typically built with respectively the [`buildPythonPackage`](#buildpythonpackage-function) and
+[`buildPythonApplication`](#buildpythonapplication-function) functions. These two functions also support installing a `wheel`.
+
+All Python packages reside in `pkgs/top-level/python-packages.nix` and all
+applications elsewhere. In case a package is used as both a library and an
+application, then the package should be in `pkgs/top-level/python-packages.nix`
+since only those packages are made available for all interpreter versions. The
+preferred location for library expressions is in
+`pkgs/development/python-modules`. It is important that these packages are
+called from `pkgs/top-level/python-packages.nix` and not elsewhere, to guarantee
+the right version of the package is built.
+
+Based on the packages defined in `pkgs/top-level/python-packages.nix` an
+attribute set is created for each available Python interpreter. The available
+sets are
+
+* `pkgs.python27Packages`
+* `pkgs.python3Packages`
+* `pkgs.python39Packages`
+* `pkgs.python310Packages`
+* `pkgs.python311Packages`
+* `pkgs.python312Packages`
+* `pkgs.python313Packages`
+* `pkgs.pypyPackages`
+
+and the aliases
+
+* `pkgs.python2Packages` pointing to `pkgs.python27Packages`
+* `pkgs.python3Packages` pointing to `pkgs.python311Packages`
+* `pkgs.pythonPackages` pointing to `pkgs.python2Packages`
+
+#### `buildPythonPackage` function {#buildpythonpackage-function}
+
+The `buildPythonPackage` function has its name binding in
+`pkgs/development/interpreters/python/python-packages-base.nix` and is
+implemented in `pkgs/development/interpreters/python/mk-python-derivation.nix`
+using setup hooks.
+
+The following is an example:
+
+```nix
+{ lib
+, buildPythonPackage
+, fetchPypi
+
+# build-system
+, setuptools-scm
+
+# dependencies
+, attrs
+, pluggy
+, py
+, setuptools
+, six
+
+# tests
+, hypothesis
+ }:
+
+buildPythonPackage rec {
+  pname = "pytest";
+  version = "3.3.1";
+  pyproject = true;
+
+  src = fetchPypi {
+    inherit pname version;
+    hash = "sha256-z4Q23FnYaVNG/NOrKW3kZCXsqwDWQJbOvnn7Ueyy65M=";
+  };
+
+  postPatch = ''
+    # don't test bash builtins
+    rm testing/test_argcomplete.py
+  '';
+
+  build-system = [
+    setuptools-scm
+  ];
+
+  dependencies = [
+    attrs
+    py
+    setuptools
+    six
+    pluggy
+  ];
+
+  nativeCheckInputs = [
+    hypothesis
+  ];
+
+  meta = {
+    changelog = "https://github.com/pytest-dev/pytest/releases/tag/${version}";
+    description = "Framework for writing tests";
+    homepage = "https://github.com/pytest-dev/pytest";
+    license = lib.licenses.mit;
+    maintainers = with lib.maintainers; [ domenkozar lovek323 madjar lsix ];
+  };
+}
+```
+
+The `buildPythonPackage` mainly does four things:
+
+* In the [`buildPhase`](#build-phase), it calls `${python.pythonOnBuildForHost.interpreter} setup.py bdist_wheel` to
+  build a wheel binary zipfile.
+* In the [`installPhase`](#ssec-install-phase), it installs the wheel file using `pip install *.whl`.
+* In the [`postFixup`](#var-stdenv-postFixup) phase, the `wrapPythonPrograms` bash function is called to
+  wrap all programs in the `$out/bin/*` directory to include `$PATH`
+  environment variable and add dependent libraries to script's `sys.path`.
+* In the [`installCheck`](#ssec-installCheck-phase) phase, `${python.interpreter} setup.py test` is run.
+
+By default tests are run because [`doCheck = true`](#var-stdenv-doCheck). Test dependencies, like
+e.g. the test runner, should be added to [`nativeCheckInputs`](#var-stdenv-nativeCheckInputs).
+
+By default `meta.platforms` is set to the same value
+as the interpreter unless overridden otherwise.
+
+##### `buildPythonPackage` parameters {#buildpythonpackage-parameters}
+
+All parameters from [`stdenv.mkDerivation`](#sec-using-stdenv) function are still supported. The
+following are specific to `buildPythonPackage`:
+
+* `catchConflicts ? true`: If `true`, abort package build if a package name
+  appears more than once in dependency tree. Default is `true`.
+* `disabled ? false`: If `true`, package is not built for the particular Python
+  interpreter version.
+* `dontWrapPythonPrograms ? false`: Skip wrapping of Python programs.
+* `permitUserSite ? false`: Skip setting the `PYTHONNOUSERSITE` environment
+  variable in wrapped programs.
+* `pyproject`: Whether the pyproject format should be used. When set to `true`,
+  `pypaBuildHook` will be used, and you can add the required build dependencies
+  from `build-system.requires` to `build-system`. Note that the pyproject
+  format falls back to using `setuptools`, so you can use `pyproject = true`
+  even if the package only has a `setup.py`. When set to `false`, you can
+  use the existing [hooks](#setup-hooks) or provide your own logic to build the
+  package. This can be useful for packages that don't support the pyproject
+  format. When unset, the legacy `setuptools` hooks are used for backwards
+  compatibility.
+* `makeWrapperArgs ? []`: A list of strings. Arguments to be passed to
+  [`makeWrapper`](#fun-makeWrapper), which wraps generated binaries. By default, the arguments to
+  [`makeWrapper`](#fun-makeWrapper) set `PATH` and `PYTHONPATH` environment variables before calling
+  the binary. Additional arguments here can allow a developer to set environment
+  variables which will be available when the binary is run. For example,
+  `makeWrapperArgs = ["--set FOO BAR" "--set BAZ QUX"]`.
+* `namePrefix`: Prepends text to `${name}` parameter. In case of libraries, this
+  defaults to `"python3.8-"` for Python 3.8, etc., and in case of applications to `""`.
+* `pipInstallFlags ? []`: A list of strings. Arguments to be passed to `pip
+  install`. To pass options to `python setup.py install`, use
+  `--install-option`. E.g., `pipInstallFlags=["--install-option='--cpp_implementation'"]`.
+* `pipBuildFlags ? []`: A list of strings. Arguments to be passed to `pip wheel`.
+* `pypaBuildFlags ? []`: A list of strings. Arguments to be passed to `python -m build --wheel`.
+* `pythonPath ? []`: List of packages to be added into `$PYTHONPATH`. Packages
+  in `pythonPath` are not propagated (contrary to [`propagatedBuildInputs`](#var-stdenv-propagatedBuildInputs)).
+* `preShellHook`: Hook to execute commands before `shellHook`.
+* `postShellHook`: Hook to execute commands after `shellHook`.
+* `removeBinByteCode ? true`: Remove bytecode from `/bin`. Bytecode is only
+  created when the filenames end with `.py`.
+* `setupPyGlobalFlags ? []`: List of flags passed to `setup.py` command.
+* `setupPyBuildFlags ? []`: List of flags passed to `setup.py build_ext` command.
+
+The [`stdenv.mkDerivation`](#sec-using-stdenv) function accepts various parameters for describing
+build inputs (see "Specifying dependencies"). The following are of special
+interest for Python packages, either because these are primarily used, or
+because their behaviour is different:
+
+* `nativeBuildInputs ? []`: Build-time only dependencies. Typically executables.
+* `build-system ? []`: Build-time only Python dependencies. Items listed in `build-system.requires`/`setup_requires`.
+* `buildInputs ? []`: Build and/or run-time dependencies that need to be
+  compiled for the host machine. Typically non-Python libraries which are being
+  linked.
+* `nativeCheckInputs ? []`: Dependencies needed for running the [`checkPhase`](#ssec-check-phase). These
+  are added to [`nativeBuildInputs`](#var-stdenv-nativeBuildInputs) when [`doCheck = true`](#var-stdenv-doCheck). Items listed in
+  `tests_require` go here.
+* `dependencies ? []`: Aside from propagating dependencies,
+  `buildPythonPackage` also injects code into and wraps executables with the
+  paths included in this list. Items listed in `install_requires` go here.
+* `optional-dependencies ? { }`: Optional feature flagged dependencies.  Items listed in `extras_requires` go here.
+
+Aside from propagating dependencies,
+  `buildPythonPackage` also injects code into and wraps executables with the
+  paths included in this list. Items listed in `extras_requires` go here.
+
+##### Overriding Python packages {#overriding-python-packages}
+
+The `buildPythonPackage` function has a `overridePythonAttrs` method that can be
+used to override the package. In the following example we create an environment
+where we have the `blaze` package using an older version of `pandas`. We
+override first the Python interpreter and pass `packageOverrides` which contains
+the overrides for packages in the package set.
+
+```nix
+with import <nixpkgs> {};
+
+(let
+  python = let
+    packageOverrides = self: super: {
+      pandas = super.pandas.overridePythonAttrs(old: rec {
+        version = "0.19.1";
+        src =  fetchPypi {
+          pname = "pandas";
+          inherit version;
+          hash = "sha256-JQn+rtpy/OA2deLszSKEuxyttqBzcAil50H+JDHUdCE=";
+        };
+      });
+    };
+  in pkgs.python3.override {inherit packageOverrides; self = python;};
+
+in python.withPackages(ps: [ ps.blaze ])).env
+```
+
+The next example shows a non trivial overriding of the `blas` implementation to
+be used through out all of the Python package set:
+
+```nix
+{
+  python3MyBlas = pkgs.python3.override {
+    packageOverrides = self: super: {
+      # We need toPythonModule for the package set to evaluate this
+      blas = super.toPythonModule(super.pkgs.blas.override {
+        blasProvider = super.pkgs.mkl;
+      });
+      lapack = super.toPythonModule(super.pkgs.lapack.override {
+        lapackProvider = super.pkgs.mkl;
+      });
+    };
+  };
+}
+```
+
+This is particularly useful for numpy and scipy users who want to gain speed with other blas implementations.
+Note that using `scipy = super.scipy.override { blas = super.pkgs.mkl; };` will likely result in
+compilation issues, because scipy dependencies need to use the same blas implementation as well.
+
+#### `buildPythonApplication` function {#buildpythonapplication-function}
+
+The [`buildPythonApplication`](#buildpythonapplication-function) function is practically the same as
+[`buildPythonPackage`](#buildpythonpackage-function). The main purpose of this function is to build a Python
+package where one is interested only in the executables, and not importable
+modules. For that reason, when adding this package to a [`python.buildEnv`](#python.buildenv-function), the
+modules won't be made available.
+
+Another difference is that [`buildPythonPackage`](#buildpythonpackage-function) by default prefixes the names of
+the packages with the version of the interpreter. Because this is irrelevant for
+applications, the prefix is omitted.
+
+When packaging a Python application with [`buildPythonApplication`](#buildpythonapplication-function), it should be
+called with `callPackage` and passed `python3` or `python3Packages` (possibly
+specifying an interpreter version), like this:
+
+```nix
+{ lib
+, python3Packages
+, fetchPypi
+}:
+
+python3Packages.buildPythonApplication rec {
+  pname = "luigi";
+  version = "2.7.9";
+  pyproject = true;
+
+  src = fetchPypi {
+    inherit pname version;
+    hash  = "sha256-Pe229rT0aHwA98s+nTHQMEFKZPo/yw6sot8MivFDvAw=";
+  };
+
+  build-system = with python3Packages; [
+    setuptools
+    wheel
+  ];
+
+  dependencies = with python3Packages; [
+    tornado
+    python-daemon
+  ];
+
+  meta = {
+    # ...
+  };
+}
+```
+
+This is then added to `all-packages.nix` just as any other application would be.
+
+```nix
+{
+  luigi = callPackage ../applications/networking/cluster/luigi { };
+}
+```
+
+Since the package is an application, a consumer doesn't need to care about
+Python versions or modules, which is why they don't go in `python3Packages`.
+
+#### `toPythonApplication` function {#topythonapplication-function}
+
+A distinction is made between applications and libraries, however, sometimes a
+package is used as both. In this case the package is added as a library to
+`python-packages.nix` and as an application to `all-packages.nix`. To reduce
+duplication the `toPythonApplication` can be used to convert a library to an
+application.
+
+The Nix expression shall use [`buildPythonPackage`](#buildpythonpackage-function) and be called from
+`python-packages.nix`. A reference shall be created from `all-packages.nix` to
+the attribute in `python-packages.nix`, and the `toPythonApplication` shall be
+applied to the reference:
+
+```nix
+{
+  youtube-dl = with python3Packages; toPythonApplication youtube-dl;
+}
+```
+
+#### `toPythonModule` function {#topythonmodule-function}
+
+In some cases, such as bindings, a package is created using
+[`stdenv.mkDerivation`](#sec-using-stdenv) and added as attribute in `all-packages.nix`. The Python
+bindings should be made available from `python-packages.nix`. The
+`toPythonModule` function takes a derivation and makes certain Python-specific
+modifications.
+
+```nix
+{
+  opencv = toPythonModule (pkgs.opencv.override {
+    enablePython = true;
+    pythonPackages = self;
+  });
+}
+```
+
+Do pay attention to passing in the right Python version!
+
+#### `python.buildEnv` function {#python.buildenv-function}
+
+Python environments can be created using the low-level `pkgs.buildEnv` function.
+This example shows how to create an environment that has the Pyramid Web Framework.
+Saving the following as `default.nix`
+
+```nix
+with import <nixpkgs> {};
+
+python3.buildEnv.override {
+  extraLibs = [ python3Packages.pyramid ];
+  ignoreCollisions = true;
+}
+```
+
+and running `nix-build` will create
+
+```
+/nix/store/cf1xhjwzmdki7fasgr4kz6di72ykicl5-python-2.7.8-env
+```
+
+with wrapped binaries in `bin/`.
+
+You can also use the `env` attribute to create local environments with needed
+packages installed. This is somewhat comparable to `virtualenv`. For example,
+running `nix-shell` with the following `shell.nix`
+
+```nix
+with import <nixpkgs> {};
+
+(python3.buildEnv.override {
+  extraLibs = with python3Packages; [
+    numpy
+    requests
+  ];
+}).env
+```
+
+will drop you into a shell where Python will have the
+specified packages in its path.
+
+##### `python.buildEnv` arguments {#python.buildenv-arguments}
+
+
+* `extraLibs`: List of packages installed inside the environment.
+* `postBuild`: Shell command executed after the build of environment.
+* `ignoreCollisions`: Ignore file collisions inside the environment (default is `false`).
+* `permitUserSite`: Skip setting the `PYTHONNOUSERSITE` environment variable in
+  wrapped binaries in the environment.
+
+#### `python.withPackages` function {#python.withpackages-function}
+
+The [`python.withPackages`](#python.withpackages-function) function provides a simpler interface to the [`python.buildEnv`](#python.buildenv-function) functionality.
+It takes a function as an argument that is passed the set of python packages and returns the list
+of the packages to be included in the environment. Using the [`withPackages`](#python.withpackages-function) function, the previous
+example for the Pyramid Web Framework environment can be written like this:
+
+```nix
+with import <nixpkgs> {};
+
+python.withPackages (ps: [ ps.pyramid ])
+```
+
+[`withPackages`](#python.withpackages-function) passes the correct package set for the specific interpreter
+version as an argument to the function. In the above example, `ps` equals
+`pythonPackages`. But you can also easily switch to using python3:
+
+```nix
+with import <nixpkgs> {};
+
+python3.withPackages (ps: [ ps.pyramid ])
+```
+
+Now, `ps` is set to `python3Packages`, matching the version of the interpreter.
+
+As [`python.withPackages`](#python.withpackages-function) uses [`python.buildEnv`](#python.buildenv-function) under the hood, it also
+supports the `env` attribute. The `shell.nix` file from the previous section can
+thus be also written like this:
+
+```nix
+with import <nixpkgs> {};
+
+(python3.withPackages (ps: with ps; [
+  numpy
+  requests
+])).env
+```
+
+In contrast to [`python.buildEnv`](#python.buildenv-function), [`python.withPackages`](#python.withpackages-function) does not support the
+more advanced options such as `ignoreCollisions = true` or `postBuild`. If you
+need them, you have to use [`python.buildEnv`](#python.buildenv-function).
+
+Python 2 namespace packages may provide `__init__.py` that collide. In that case
+[`python.buildEnv`](#python.buildenv-function) should be used with `ignoreCollisions = true`.
+
+#### Setup hooks {#setup-hooks}
+
+The following are setup hooks specifically for Python packages. Most of these
+are used in [`buildPythonPackage`](#buildpythonpackage-function).
+
+- `eggUnpackhook` to move an egg to the correct folder so it can be installed
+  with the `eggInstallHook`
+- `eggBuildHook` to skip building for eggs.
+- `eggInstallHook` to install eggs.
+- `pipBuildHook` to build a wheel using `pip` and PEP 517. Note a build system
+  (e.g. `setuptools` or `flit`) should still be added as `build-system`.
+- `pypaBuildHook` to build a wheel using
+  [`pypa/build`](https://pypa-build.readthedocs.io/en/latest/index.html) and
+  PEP 517/518. Note a build system (e.g. `setuptools` or `flit`) should still
+  be added as `build-system`.
+- `pipInstallHook` to install wheels.
+- `pytestCheckHook` to run tests with `pytest`. See [example usage](#using-pytestcheckhook).
+- `pythonCatchConflictsHook` to fail if the package depends on two different versions of the same dependency.
+- `pythonImportsCheckHook` to check whether importing the listed modules works.
+- `pythonRelaxDepsHook` will relax Python dependencies restrictions for the package.
+  See [example usage](#using-pythonrelaxdepshook).
+- `pythonRemoveBinBytecode` to remove bytecode from the `/bin` folder.
+- `setuptoolsBuildHook` to build a wheel using `setuptools`.
+- `setuptoolsCheckHook` to run tests with `python setup.py test`.
+- `sphinxHook` to build documentation and manpages using Sphinx.
+- `venvShellHook` to source a Python 3 `venv` at the `venvDir` location. A
+  `venv` is created if it does not yet exist. `postVenvCreation` can be used to
+  to run commands only after venv is first created.
+- `wheelUnpackHook` to move a wheel to the correct folder so it can be installed
+  with the `pipInstallHook`.
+- `unittestCheckHook` will run tests with `python -m unittest discover`. See [example usage](#using-unittestcheckhook).
+
+### Development mode {#development-mode}
+
+Development or editable mode is supported. To develop Python packages
+[`buildPythonPackage`](#buildpythonpackage-function) has additional logic inside `shellPhase` to run `pip
+install -e . --prefix $TMPDIR/`for the package.
+
+Warning: `shellPhase` is executed only if `setup.py` exists.
+
+Given a `default.nix`:
+
+```nix
+with import <nixpkgs> {};
+
+python3Packages.buildPythonPackage {
+  name = "myproject";
+  buildInputs = with python3Packages; [ pyramid ];
+
+  src = ./.;
+}
+```
+
+Running `nix-shell` with no arguments should give you the environment in which
+the package would be built with `nix-build`.
+
+Shortcut to setup environments with C headers/libraries and Python packages:
+
+```shell
+nix-shell -p python3Packages.pyramid zlib libjpeg git
+```
+
+::: {.note}
+There is a boolean value `lib.inNixShell` set to `true` if nix-shell is invoked.
+:::
+
+## User Guide {#user-guide}
+
+### Using Python {#using-python}
+
+#### Overview {#overview}
+
+Several versions of the Python interpreter are available on Nix, as well as a
+high amount of packages. The attribute `python3` refers to the default
+interpreter, which is currently CPython 3.11. The attribute `python` refers to
+CPython 2.7 for backwards-compatibility. It is also possible to refer to
+specific versions, e.g. `python311` refers to CPython 3.11, and `pypy` refers to
+the default PyPy interpreter.
+
+Python is used a lot, and in different ways. This affects also how it is
+packaged. In the case of Python on Nix, an important distinction is made between
+whether the package is considered primarily an application, or whether it should
+be used as a library, i.e., of primary interest are the modules in
+`site-packages` that should be importable.
+
+In the Nixpkgs tree Python applications can be found throughout, depending on
+what they do, and are called from the main package set. Python libraries,
+however, are in separate sets, with one set per interpreter version.
+
+The interpreters have several common attributes. One of these attributes is
+`pkgs`, which is a package set of Python libraries for this specific
+interpreter. E.g., the `toolz` package corresponding to the default interpreter
+is `python3.pkgs.toolz`, and the CPython 3.11 version is `python311.pkgs.toolz`.
+The main package set contains aliases to these package sets, e.g.
+`pythonPackages` refers to `python.pkgs` and `python311Packages` to
+`python311.pkgs`.
+
+#### Installing Python and packages {#installing-python-and-packages}
+
+The Nix and NixOS manuals explain how packages are generally installed. In the
+case of Python and Nix, it is important to make a distinction between whether the
+package is considered an application or a library.
+
+Applications on Nix are typically installed into your user profile imperatively
+using `nix-env -i`, and on NixOS declaratively by adding the package name to
+`environment.systemPackages` in `/etc/nixos/configuration.nix`. Dependencies
+such as libraries are automatically installed and should not be installed
+explicitly.
+
+The same goes for Python applications. Python applications can be installed in
+your profile, and will be wrapped to find their exact library dependencies,
+without impacting other applications or polluting your user environment.
+
+But Python libraries you would like to use for development cannot be installed,
+at least not individually, because they won't be able to find each other
+resulting in import errors. Instead, it is possible to create an environment
+with [`python.buildEnv`](#python.buildenv-function) or [`python.withPackages`](#python.withpackages-function) where the interpreter and other
+executables are wrapped to be able to find each other and all of the modules.
+
+In the following examples we will start by creating a simple, ad-hoc environment
+with a nix-shell that has `numpy` and `toolz` in Python 3.11; then we will create
+a re-usable environment in a single-file Python script; then we will create a
+full Python environment for development with this same environment.
+
+Philosophically, this should be familiar to users who are used to a `venv` style
+of development: individual projects create their own Python environments without
+impacting the global environment or each other.
+
+#### Ad-hoc temporary Python environment with `nix-shell` {#ad-hoc-temporary-python-environment-with-nix-shell}
+
+The simplest way to start playing with the way nix wraps and sets up Python
+environments is with `nix-shell` at the cmdline. These environments create a
+temporary shell session with a Python and a *precise* list of packages (plus
+their runtime dependencies), with no other Python packages in the Python
+interpreter's scope.
+
+To create a Python 3.11 session with `numpy` and `toolz` available, run:
+
+```sh
+$ nix-shell -p 'python311.withPackages(ps: with ps; [ numpy toolz ])'
+```
+
+By default `nix-shell` will start a `bash` session with this interpreter in our
+`PATH`, so if we then run:
+
+```Python console
+[nix-shell:~/src/nixpkgs]$ python3
+Python 3.11.3 (main, Apr  4 2023, 22:36:41) [GCC 12.2.0] on linux
+Type "help", "copyright", "credits" or "license" for more information.
+>>> import numpy; import toolz
+```
+
+Note that no other modules are in scope, even if they were imperatively
+installed into our user environment as a dependency of a Python application:
+
+```Python console
+>>> import requests
+Traceback (most recent call last):
+  File "<stdin>", line 1, in <module>
+ModuleNotFoundError: No module named 'requests'
+```
+
+We can add as many additional modules onto the `nix-shell` as we need, and we
+will still get 1 wrapped Python interpreter. We can start the interpreter
+directly like so:
+
+```sh
+$ nix-shell -p "python311.withPackages (ps: with ps; [ numpy toolz requests ])" --run python3
+this derivation will be built:
+  /nix/store/r19yf5qgfiakqlhkgjahbg3zg79549n4-python3-3.11.2-env.drv
+building '/nix/store/r19yf5qgfiakqlhkgjahbg3zg79549n4-python3-3.11.2-env.drv'...
+created 273 symlinks in user environment
+Python 3.11.2 (main, Feb  7 2023, 13:52:42) [GCC 12.2.0] on linux
+Type "help", "copyright", "credits" or "license" for more information.
+>>> import requests
+>>>
+```
+
+Notice that this time it built a new Python environment, which now includes
+`requests`. Building an environment just creates wrapper scripts that expose the
+selected dependencies to the interpreter while re-using the actual modules. This
+means if any other env has installed `requests` or `numpy` in a different
+context, we don't need to recompile them -- we just recompile the wrapper script
+that sets up an interpreter pointing to them. This matters much more for "big"
+modules like `pytorch` or `tensorflow`.
+
+Module names usually match their names on [pypi.org](https://pypi.org/), but
+you can use the [Nixpkgs search website](https://nixos.org/nixos/packages.html)
+to find them as well (along with non-python packages).
+
+At this point we can create throwaway experimental Python environments with
+arbitrary dependencies. This is a good way to get a feel for how the Python
+interpreter and dependencies work in Nix and NixOS, but to do some actual
+development, we'll want to make it a bit more persistent.
+
+##### Running Python scripts and using `nix-shell` as shebang {#running-python-scripts-and-using-nix-shell-as-shebang}
+
+Sometimes, we have a script whose header looks like this:
+
+```python
+#!/usr/bin/env python3
+import numpy as np
+a = np.array([1,2])
+b = np.array([3,4])
+print(f"The dot product of {a} and {b} is: {np.dot(a, b)}")
+```
+
+Executing this script requires a `python3` that has `numpy`. Using what we learned
+in the previous section, we could startup a shell and just run it like so:
+
+```ShellSession
+$ nix-shell -p 'python311.withPackages (ps: with ps; [ numpy ])' --run 'python3 foo.py'
+The dot product of [1 2] and [3 4] is: 11
+```
+
+But if we maintain the script ourselves, and if there are more dependencies, it
+may be nice to encode those dependencies in source to make the script re-usable
+without that bit of knowledge. That can be done by using `nix-shell` as a
+[shebang](https://en.wikipedia.org/wiki/Shebang_(Unix)), like so:
+
+```python
+#!/usr/bin/env nix-shell
+#!nix-shell -i python3 -p "python3.withPackages(ps: [ ps.numpy ])"
+import numpy as np
+a = np.array([1,2])
+b = np.array([3,4])
+print(f"The dot product of {a} and {b} is: {np.dot(a, b)}")
+```
+
+Then we execute it, without requiring any environment setup at all!
+
+```sh
+$ ./foo.py
+The dot product of [1 2] and [3 4] is: 11
+```
+
+If the dependencies are not available on the host where `foo.py` is executed, it
+will build or download them from a Nix binary cache prior to starting up, prior
+that it is executed on a machine with a multi-user nix installation.
+
+This provides a way to ship a self bootstrapping Python script, akin to a
+statically linked binary, where it can be run on any machine (provided nix is
+installed) without having to assume that `numpy` is installed globally on the
+system.
+
+By default it is pulling the import checkout of Nixpkgs itself from our nix
+channel, which is nice as it cache aligns with our other package builds, but we
+can make it fully reproducible by pinning the `nixpkgs` import:
+
+```python
+#!/usr/bin/env nix-shell
+#!nix-shell -i python3 -p "python3.withPackages (ps: [ ps.numpy ])"
+#!nix-shell -I nixpkgs=https://github.com/NixOS/nixpkgs/archive/e51209796c4262bfb8908e3d6d72302fe4e96f5f.tar.gz
+import numpy as np
+a = np.array([1,2])
+b = np.array([3,4])
+print(f"The dot product of {a} and {b} is: {np.dot(a, b)}")
+```
+
+This will execute with the exact same versions of Python 3.10, numpy, and system
+dependencies a year from now as it does today, because it will always use
+exactly git commit `e51209796c4262bfb8908e3d6d72302fe4e96f5f` of Nixpkgs for all
+of the package versions.
+
+This is also a great way to ensure the script executes identically on different
+servers.
+
+##### Load environment from `.nix` expression {#load-environment-from-.nix-expression}
+
+We've now seen how to create an ad-hoc temporary shell session, and how to
+create a single script with Python dependencies, but in the course of normal
+development we're usually working in an entire package repository.
+
+As explained [in the `nix-shell` section](https://nixos.org/manual/nix/stable/command-ref/nix-shell) of the Nix manual, `nix-shell` can also load an expression from a `.nix` file.
+Say we want to have Python 3.11, `numpy` and `toolz`, like before,
+in an environment. We can add a `shell.nix` file describing our dependencies:
+
+```nix
+with import <nixpkgs> {};
+(python311.withPackages (ps: with ps; [
+  numpy
+  toolz
+])).env
+```
+
+And then at the command line, just typing `nix-shell` produces the same
+environment as before. In a normal project, we'll likely have many more
+dependencies; this can provide a way for developers to share the environments
+with each other and with CI builders.
+
+What's happening here?
+
+1. We begin with importing the Nix Packages collections. `import <nixpkgs>`
+   imports the `<nixpkgs>` function, `{}` calls it and the `with` statement
+   brings all attributes of `nixpkgs` in the local scope. These attributes form
+   the main package set.
+2. Then we create a Python 3.11 environment with the [`withPackages`](#python.withpackages-function) function, as before.
+3. The [`withPackages`](#python.withpackages-function) function expects us to provide a function as an argument
+   that takes the set of all Python packages and returns a list of packages to
+   include in the environment. Here, we select the packages `numpy` and `toolz`
+   from the package set.
+
+To combine this with `mkShell` you can:
+
+```nix
+with import <nixpkgs> {};
+let
+  pythonEnv = python311.withPackages (ps: [
+    ps.numpy
+    ps.toolz
+  ]);
+in mkShell {
+  packages = [
+    pythonEnv
+
+    black
+    mypy
+
+    libffi
+    openssl
+  ];
+}
+```
+
+This will create a unified environment that has not just our Python interpreter
+and its Python dependencies, but also tools like `black` or `mypy` and libraries
+like `libffi` the `openssl` in scope. This is generic and can span any number of
+tools or languages across the Nixpkgs ecosystem.
+
+##### Installing environments globally on the system {#installing-environments-globally-on-the-system}
+
+Up to now, we've been creating environments scoped to an ad-hoc shell session,
+or a single script, or a single project. This is generally advisable, as it
+avoids pollution across contexts.
+
+However, sometimes we know we will often want a Python with some basic packages,
+and want this available without having to enter into a shell or build context.
+This can be useful to have things like vim/emacs editors and plugins or shell
+tools "just work" without having to set them up, or when running other software
+that expects packages to be installed globally.
+
+To create your own custom environment, create a file in `~/.config/nixpkgs/overlays/`
+that looks like this:
+
+```nix
+# ~/.config/nixpkgs/overlays/myEnv.nix
+self: super: {
+  myEnv = super.buildEnv {
+    name = "myEnv";
+    paths = [
+      # A Python 3 interpreter with some packages
+      (self.python3.withPackages (
+        ps: with ps; [
+          pyflakes
+          pytest
+          black
+        ]
+      ))
+
+      # Some other packages we'd like as part of this env
+      self.mypy
+      self.black
+      self.ripgrep
+      self.tmux
+    ];
+  };
+}
+```
+
+You can then build and install this to your profile with:
+
+```sh
+nix-env -iA myEnv
+```
+
+One limitation of this is that you can only have 1 Python env installed
+globally, since they conflict on the `python` to load out of your `PATH`.
+
+If you get a conflict or prefer to keep the setup clean, you can have `nix-env`
+atomically *uninstall* all other imperatively installed packages and replace
+your profile with just `myEnv` by using the `--replace` flag.
+
+##### Environment defined in `/etc/nixos/configuration.nix` {#environment-defined-in-etcnixosconfiguration.nix}
+
+For the sake of completeness, here's how to install the environment system-wide
+on NixOS.
+
+```nix
+{ # ...
+
+  environment.systemPackages = with pkgs; [
+    (python310.withPackages(ps: with ps; [ numpy toolz ]))
+  ];
+}
+```
+
+### Developing with Python {#developing-with-python}
+
+Above, we were mostly just focused on use cases and what to do to get started
+creating working Python environments in nix.
+
+Now that you know the basics to be up and running, it is time to take a step
+back and take a deeper look at how Python packages are packaged on Nix. Then,
+we will look at how you can use development mode with your code.
+
+#### Python library packages in Nixpkgs {#python-library-packages-in-nixpkgs}
+
+With Nix all packages are built by functions. The main function in Nix for
+building Python libraries is [`buildPythonPackage`](#buildpythonpackage-function). Let's see how we can build the
+`toolz` package.
+
+```nix
+{ lib
+, buildPythonPackage
+, fetchPypi
+, setuptools
+, wheel
+}:
+
+buildPythonPackage rec {
+  pname = "toolz";
+  version = "0.10.0";
+  pyproject = true;
+
+  src = fetchPypi {
+    inherit pname version;
+    hash = "sha256-CP3V73yWSArRHBLUct4hrNMjWZlvaaUlkpm1QP66RWA=";
+  };
+
+  build-system = [
+    setuptools
+    wheel
+  ];
+
+  # has no tests
+  doCheck = false;
+
+  pythonImportsCheck = [
+    "toolz.itertoolz"
+    "toolz.functoolz"
+    "toolz.dicttoolz"
+  ];
+
+  meta = {
+    changelog = "https://github.com/pytoolz/toolz/releases/tag/${version}";
+    homepage = "https://github.com/pytoolz/toolz";
+    description = "List processing tools and functional utilities";
+    license = lib.licenses.bsd3;
+    maintainers = with lib.maintainers; [ fridh ];
+  };
+}
+```
+
+What happens here? The function [`buildPythonPackage`](#buildpythonpackage-function) is called and as argument
+it accepts a set. In this case the set is a recursive set, `rec`. One of the
+arguments is the name of the package, which consists of a basename (generally
+following the name on PyPi) and a version. Another argument, `src` specifies the
+source, which in this case is fetched from PyPI using the helper function
+`fetchPypi`. The argument `doCheck` is used to set whether tests should be run
+when building the package. Since there are no tests, we rely on [`pythonImportsCheck`](#using-pythonimportscheck)
+to test whether the package can be imported. Furthermore, we specify some meta
+information. The output of the function is a derivation.
+
+An expression for `toolz` can be found in the Nixpkgs repository. As explained
+in the introduction of this Python section, a derivation of `toolz` is available
+for each interpreter version, e.g. `python311.pkgs.toolz` refers to the `toolz`
+derivation corresponding to the CPython 3.11 interpreter.
+
+The above example works when you're directly working on
+`pkgs/top-level/python-packages.nix` in the Nixpkgs repository. Often though,
+you will want to test a Nix expression outside of the Nixpkgs tree.
+
+The following expression creates a derivation for the `toolz` package,
+and adds it along with a `numpy` package to a Python environment.
+
+```nix
+with import <nixpkgs> {};
+
+( let
+    my_toolz = python311.pkgs.buildPythonPackage rec {
+      pname = "toolz";
+      version = "0.10.0";
+      pyproject = true;
+
+      src = fetchPypi {
+        inherit pname version;
+        hash = "sha256-CP3V73yWSArRHBLUct4hrNMjWZlvaaUlkpm1QP66RWA=";
+      };
+
+      build-system = [
+        python311.pkgs.setuptools
+        python311.pkgs.wheel
+      ];
+
+      # has no tests
+      doCheck = false;
+
+      meta = {
+        homepage = "https://github.com/pytoolz/toolz/";
+        description = "List processing tools and functional utilities";
+        # [...]
+      };
+    };
+
+  in python311.withPackages (ps: with ps; [
+    numpy
+    my_toolz
+  ])
+).env
+```
+
+Executing `nix-shell` will result in an environment in which you can use
+Python 3.11 and the `toolz` package. As you can see we had to explicitly mention
+for which Python version we want to build a package.
+
+So, what did we do here? Well, we took the Nix expression that we used earlier
+to build a Python environment, and said that we wanted to include our own
+version of `toolz`, named `my_toolz`. To introduce our own package in the scope
+of [`withPackages`](#python.withpackages-function) we used a `let` expression. You can see that we used
+`ps.numpy` to select numpy from the nixpkgs package set (`ps`). We did not take
+`toolz` from the Nixpkgs package set this time, but instead took our own version
+that we introduced with the `let` expression.
+
+#### Handling dependencies {#handling-dependencies}
+
+Our example, `toolz`, does not have any dependencies on other Python packages or system libraries.
+[`buildPythonPackage`](#buildpythonpackage-function) uses the the following arguments in the following circumstances:
+
+- `dependencies` - For Python runtime dependencies.
+- `build-system` - For Python build-time requirements.
+- [`buildInputs`](#var-stdenv-buildInputs) - For non-Python build-time requirements.
+- [`nativeCheckInputs`](#var-stdenv-nativeCheckInputs) - For test dependencies
+
+Dependencies can belong to multiple arguments, for example if something is both a build time requirement & a runtime dependency.
+
+The following example shows which arguments are given to [`buildPythonPackage`](#buildpythonpackage-function) in
+order to build [`datashape`](https://github.com/blaze/datashape).
+
+```nix
+{ lib
+, buildPythonPackage
+, fetchPypi
+
+# build dependencies
+, setuptools, wheel
+
+# dependencies
+, numpy, multipledispatch, python-dateutil
+
+# tests
+, pytest
+}:
+
+buildPythonPackage rec {
+  pname = "datashape";
+  version = "0.4.7";
+  pyproject = true;
+
+  src = fetchPypi {
+    inherit pname version;
+    hash = "sha256-FLLvdm1MllKrgTGC6Gb0k0deZeVYvtCCLji/B7uhong=";
+  };
+
+  build-system = [
+    setuptools
+    wheel
+  ];
+
+  dependencies = [
+    multipledispatch
+    numpy
+    python-dateutil
+  ];
+
+  nativeCheckInputs = [
+    pytest
+  ];
+
+  meta = {
+    changelog = "https://github.com/blaze/datashape/releases/tag/${version}";
+    homepage = "https://github.com/ContinuumIO/datashape";
+    description = "A data description language";
+    license = lib.licenses.bsd2;
+    maintainers = with lib.maintainers; [ fridh ];
+  };
+}
+```
+
+We can see several runtime dependencies, `numpy`, `multipledispatch`, and
+`python-dateutil`. Furthermore, we have [`nativeCheckInputs`](#var-stdenv-nativeCheckInputs) with `pytest`.
+`pytest` is a test runner and is only used during the [`checkPhase`](#ssec-check-phase) and is
+therefore not added to `dependencies`.
+
+In the previous case we had only dependencies on other Python packages to consider.
+Occasionally you have also system libraries to consider. E.g., `lxml` provides
+Python bindings to `libxml2` and `libxslt`. These libraries are only required
+when building the bindings and are therefore added as [`buildInputs`](#var-stdenv-buildInputs).
+
+```nix
+{ lib
+, buildPythonPackage
+, fetchPypi
+, setuptools
+, wheel
+, libxml2
+, libxslt
+}:
+
+buildPythonPackage rec {
+  pname = "lxml";
+  version = "3.4.4";
+  pyproject = true;
+
+  src = fetchPypi {
+    inherit pname version;
+    hash = "sha256-s9NiusRxFydHzaNRMjjxFcvWxfi45jGb9ql6eJJyQJk=";
+  };
+
+  build-system = [
+    setuptools
+    wheel
+  ];
+
+  buildInputs = [
+    libxml2
+    libxslt
+  ];
+
+  meta = {
+    changelog = "https://github.com/lxml/lxml/releases/tag/lxml-${version}";
+    description = "Pythonic binding for the libxml2 and libxslt libraries";
+    homepage = "https://lxml.de";
+    license = lib.licenses.bsd3;
+    maintainers = with lib.maintainers; [ sjourdois ];
+  };
+}
+```
+
+In this example `lxml` and Nix are able to work out exactly where the relevant
+files of the dependencies are. This is not always the case.
+
+The example below shows bindings to The Fastest Fourier Transform in the West,
+commonly known as FFTW. On Nix we have separate packages of FFTW for the
+different types of floats (`"single"`, `"double"`, `"long-double"`). The
+bindings need all three types, and therefore we add all three as [`buildInputs`](#var-stdenv-buildInputs).
+The bindings don't expect to find each of them in a different folder, and
+therefore we have to set `LDFLAGS` and `CFLAGS`.
+
+```nix
+{ lib
+, buildPythonPackage
+, fetchPypi
+
+# build dependencies
+, setuptools
+, wheel
+
+# dependencies
+, fftw
+, fftwFloat
+, fftwLongDouble
+, numpy
+, scipy
+}:
+
+buildPythonPackage rec {
+  pname = "pyFFTW";
+  version = "0.9.2";
+  pyproject = true;
+
+  src = fetchPypi {
+    inherit pname version;
+    hash = "sha256-9ru2r6kwhUCaskiFoaPNuJCfCVoUL01J40byvRt4kHQ=";
+  };
+
+  build-system = [
+    setuptools
+    wheel
+  ];
+
+  buildInputs = [
+    fftw
+    fftwFloat
+    fftwLongDouble
+  ];
+
+  dependencies = [
+    numpy
+    scipy
+  ];
+
+  preConfigure = ''
+    export LDFLAGS="-L${fftw.dev}/lib -L${fftwFloat.out}/lib -L${fftwLongDouble.out}/lib"
+    export CFLAGS="-I${fftw.dev}/include -I${fftwFloat.dev}/include -I${fftwLongDouble.dev}/include"
+  '';
+
+  # Tests cannot import pyfftw. pyfftw works fine though.
+  doCheck = false;
+
+  meta = {
+    changelog = "https://github.com/pyFFTW/pyFFTW/releases/tag/v${version}";
+    description = "A pythonic wrapper around FFTW, the FFT library, presenting a unified interface for all the supported transforms";
+    homepage = "http://hgomersall.github.com/pyFFTW";
+    license = with lib.licenses; [ bsd2 bsd3 ];
+    maintainers = with lib.maintainers; [ fridh ];
+  };
+}
+```
+
+Note also the line [`doCheck = false;`](#var-stdenv-doCheck), we explicitly disabled running the test-suite.
+
+#### Testing Python Packages {#testing-python-packages}
+
+It is highly encouraged to have testing as part of the package build. This
+helps to avoid situations where the package was able to build and install,
+but is not usable at runtime. Currently, all packages will use the `test`
+command provided by the setup.py (i.e. `python setup.py test`). However,
+this is currently deprecated https://github.com/pypa/setuptools/pull/1878
+and your package should provide its own [`checkPhase`](#ssec-check-phase).
+
+::: {.note}
+The [`checkPhase`](#ssec-check-phase) for python maps to the `installCheckPhase` on a
+normal derivation. This is due to many python packages not behaving well
+to the pre-installed version of the package. Version info, and natively
+compiled extensions generally only exist in the install directory, and
+thus can cause issues when a test suite asserts on that behavior.
+:::
+
+::: {.note}
+Tests should only be disabled if they don't agree with nix
+(e.g. external dependencies, network access, flakey tests), however,
+as many tests should be enabled as possible. Failing tests can still be
+a good indication that the package is not in a valid state.
+:::
+
+#### Using pytest {#using-pytest}
+
+Pytest is the most common test runner for python repositories. A trivial
+test run would be:
+
+```nix
+{
+  nativeCheckInputs = [ pytest ];
+  checkPhase = ''
+    runHook preCheck
+
+    pytest
+
+    runHook postCheck
+  '';
+}
+```
+
+However, many repositories' test suites do not translate well to nix's build
+sandbox, and will generally need many tests to be disabled.
+
+To filter tests using pytest, one can do the following:
+
+```nix
+{
+  nativeCheckInputs = [ pytest ];
+  # avoid tests which need additional data or touch network
+  checkPhase = ''
+    runHook preCheck
+
+    pytest tests/ --ignore=tests/integration -k 'not download and not update' --ignore=tests/test_failing.py
+
+    runHook postCheck
+  '';
+}
+```
+
+`--ignore` will tell pytest to ignore that file or directory from being
+collected as part of a test run. This is useful is a file uses a package
+which is not available in nixpkgs, thus skipping that test file is much
+easier than having to create a new package.
+
+`-k` is used to define a predicate for test names. In this example, we are
+filtering out tests which contain `download` or `update` in their test case name.
+Only one `-k` argument is allowed, and thus a long predicate should be concatenated
+with “\\” and wrapped to the next line.
+
+::: {.note}
+In pytest==6.0.1, the use of “\\” to continue a line (e.g. `-k 'not download \'`) has
+been removed, in this case, it's recommended to use `pytestCheckHook`.
+:::
+
+#### Using pytestCheckHook {#using-pytestcheckhook}
+
+`pytestCheckHook` is a convenient hook which will substitute the setuptools
+`test` command for a [`checkPhase`](#ssec-check-phase) which runs `pytest`. This is also beneficial
+when a package may need many items disabled to run the test suite.
+
+Using the example above, the analogous `pytestCheckHook` usage would be:
+
+```nix
+{
+  nativeCheckInputs = [
+    pytestCheckHook
+  ];
+
+  # requires additional data
+  pytestFlagsArray = [
+    "tests/"
+    "--ignore=tests/integration"
+  ];
+
+  disabledTests = [
+    # touches network
+    "download"
+    "update"
+  ];
+
+  disabledTestPaths = [
+    "tests/test_failing.py"
+  ];
+}
+```
+
+This is especially useful when tests need to be conditionally disabled,
+for example:
+
+```nix
+{
+  disabledTests = [
+    # touches network
+    "download"
+    "update"
+  ] ++ lib.optionals (pythonAtLeast "3.8") [
+    # broken due to python3.8 async changes
+    "async"
+  ] ++ lib.optionals stdenv.isDarwin [
+    # can fail when building with other packages
+    "socket"
+  ];
+}
+```
+
+Trying to concatenate the related strings to disable tests in a regular
+[`checkPhase`](#ssec-check-phase) would be much harder to read. This also enables us to comment on
+why specific tests are disabled.
+
+#### Using pythonImportsCheck {#using-pythonimportscheck}
+
+Although unit tests are highly preferred to validate correctness of a package, not
+all packages have test suites that can be run easily, and some have none at all.
+To help ensure the package still works, [`pythonImportsCheck`](#using-pythonimportscheck) can attempt to import
+the listed modules.
+
+```nix
+{
+  pythonImportsCheck = [
+    "requests"
+    "urllib"
+  ];
+}
+```
+
+roughly translates to:
+
+```nix
+{
+  postCheck = ''
+    PYTHONPATH=$out/${python.sitePackages}:$PYTHONPATH
+    python -c "import requests; import urllib"
+  '';
+}
+```
+
+However, this is done in its own phase, and not dependent on whether [`doCheck = true;`](#var-stdenv-doCheck).
+
+This can also be useful in verifying that the package doesn't assume commonly
+present packages (e.g. `setuptools`).
+
+#### Using pythonRelaxDepsHook {#using-pythonrelaxdepshook}
+
+It is common for upstream to specify a range of versions for its package
+dependencies. This makes sense, since it ensures that the package will be built
+with a subset of packages that is well tested. However, this commonly causes
+issues when packaging in Nixpkgs, because the dependencies that this package
+may need are too new or old for the package to build correctly. We also cannot
+package multiple versions of the same package since this may cause conflicts
+in `PYTHONPATH`.
+
+One way to side step this issue is to relax the dependencies. This can be done
+by either removing the package version range or by removing the package
+declaration entirely. This can be done using the `pythonRelaxDepsHook` hook. For
+example, given the following `requirements.txt` file:
+
+```
+pkg1<1.0
+pkg2
+pkg3>=1.0,<=2.0
+```
+
+we can do:
+
+```nix
+{
+  nativeBuildInputs = [
+    pythonRelaxDepsHook
+  ];
+  pythonRelaxDeps = [
+    "pkg1"
+    "pkg3"
+  ];
+  pythonRemoveDeps = [
+    "pkg2"
+  ];
+}
+```
+
+which would result in the following `requirements.txt` file:
+
+```
+pkg1
+pkg3
+```
+
+Another option is to pass `true`, that will relax/remove all dependencies, for
+example:
+
+```nix
+{
+  nativeBuildInputs = [ pythonRelaxDepsHook ];
+  pythonRelaxDeps = true;
+}
+```
+
+which would result in the following `requirements.txt` file:
+
+```
+pkg1
+pkg2
+pkg3
+```
+
+In general you should always use `pythonRelaxDeps`, because `pythonRemoveDeps`
+will convert build errors into runtime errors. However `pythonRemoveDeps` may
+still be useful in exceptional cases, and also to remove dependencies wrongly
+declared by upstream (for example, declaring `black` as a runtime dependency
+instead of a dev dependency).
+
+Keep in mind that while the examples above are done with `requirements.txt`,
+`pythonRelaxDepsHook` works by modifying the resulting wheel file, so it should
+work with any of the [existing hooks](#setup-hooks).
+
+#### Using unittestCheckHook {#using-unittestcheckhook}
+
+`unittestCheckHook` is a hook which will substitute the setuptools `test` command for a [`checkPhase`](#ssec-check-phase) which runs `python -m unittest discover`:
+
+```nix
+{
+  nativeCheckInputs = [
+    unittestCheckHook
+  ];
+
+  unittestFlagsArray = [
+    "-s" "tests" "-v"
+  ];
+}
+```
+
+#### Using sphinxHook {#using-sphinxhook}
+
+The `sphinxHook` is a helpful tool to build documentation and manpages
+using the popular Sphinx documentation generator.
+It is setup to automatically find common documentation source paths and
+render them using the default `html` style.
+
+```nix
+{
+  outputs = [
+    "out"
+    "doc"
+  ];
+
+  nativeBuildInputs = [
+    sphinxHook
+  ];
+}
+```
+
+The hook will automatically build and install the artifact into the
+`doc` output, if it exists. It also provides an automatic diversion
+for the artifacts of the `man` builder into the `man` target.
+
+```nix
+{
+  outputs = [
+    "out"
+    "doc"
+    "man"
+  ];
+
+  # Use multiple builders
+  sphinxBuilders = [
+    "singlehtml"
+    "man"
+  ];
+}
+```
+
+Overwrite `sphinxRoot` when the hook is unable to find your
+documentation source root.
+
+```nix
+{
+  # Configure sphinxRoot for uncommon paths
+  sphinxRoot = "weird/docs/path";
+}
+```
+
+The hook is also available to packages outside the python ecosystem by
+referencing it using `sphinxHook` from top-level.
+
+### Develop local package {#develop-local-package}
+
+As a Python developer you're likely aware of [development mode](http://setuptools.readthedocs.io/en/latest/setuptools.html#development-mode)
+(`python setup.py develop`); instead of installing the package this command
+creates a special link to the project code. That way, you can run updated code
+without having to reinstall after each and every change you make. Development
+mode is also available. Let's see how you can use it.
+
+In the previous Nix expression the source was fetched from a url. We can also
+refer to a local source instead using `src = ./path/to/source/tree;`
+
+If we create a `shell.nix` file which calls [`buildPythonPackage`](#buildpythonpackage-function), and if `src`
+is a local source, and if the local source has a `setup.py`, then development
+mode is activated.
+
+In the following example, we create a simple environment that has a Python 3.11
+version of our package in it, as well as its dependencies and other packages we
+like to have in the environment, all specified with `dependencies`.
+
+```nix
+with import <nixpkgs> {};
+with python311Packages;
+
+buildPythonPackage rec {
+  name = "mypackage";
+  src = ./path/to/package/source;
+  dependencies = [
+    pytest
+    numpy
+  ];
+  propagatedBuildInputs = [
+    pkgs.libsndfile
+  ];
+}
+```
+
+It is important to note that due to how development mode is implemented on Nix
+it is not possible to have multiple packages simultaneously in development mode.
+
+### Organising your packages {#organising-your-packages}
+
+So far we discussed how you can use Python on Nix, and how you can develop with
+it. We've looked at how you write expressions to package Python packages, and we
+looked at how you can create environments in which specified packages are
+available.
+
+At some point you'll likely have multiple packages which you would
+like to be able to use in different projects. In order to minimise unnecessary
+duplication we now look at how you can maintain a repository with your
+own packages. The important functions here are `import` and `callPackage`.
+
+### Including a derivation using `callPackage` {#including-a-derivation-using-callpackage}
+
+Earlier we created a Python environment using [`withPackages`](#python.withpackages-function), and included the
+`toolz` package via a `let` expression.
+Let's split the package definition from the environment definition.
+
+We first create a function that builds `toolz` in `~/path/to/toolz/release.nix`
+
+```nix
+{ lib
+, buildPythonPackage
+, fetchPypi
+, setuptools
+, wheel
+}:
+
+buildPythonPackage rec {
+  pname = "toolz";
+  version = "0.10.0";
+  pyproject = true;
+
+  src = fetchPypi {
+    inherit pname version;
+    hash = "sha256-CP3V73yWSArRHBLUct4hrNMjWZlvaaUlkpm1QP66RWA=";
+  };
+
+  build-system = [
+    setuptools
+    wheel
+  ];
+
+  meta = {
+    changelog = "https://github.com/pytoolz/toolz/releases/tag/${version}";
+    homepage = "https://github.com/pytoolz/toolz/";
+    description = "List processing tools and functional utilities";
+    license = lib.licenses.bsd3;
+    maintainers = with lib.maintainers; [ fridh ];
+  };
+}
+```
+
+It takes an argument [`buildPythonPackage`](#buildpythonpackage-function). We now call this function using
+`callPackage` in the definition of our environment
+
+```nix
+with import <nixpkgs> {};
+
+( let
+    toolz = callPackage /path/to/toolz/release.nix {
+      buildPythonPackage = python310
+Packages.buildPythonPackage;
+    };
+  in python310.withPackages (ps: [
+    ps.numpy
+    toolz
+  ])
+).env
+```
+
+Important to remember is that the Python version for which the package is made
+depends on the `python` derivation that is passed to [`buildPythonPackage`](#buildpythonpackage-function). Nix
+tries to automatically pass arguments when possible, which is why generally you
+don't explicitly define which `python` derivation should be used. In the above
+example we use [`buildPythonPackage`](#buildpythonpackage-function) that is part of the set `python3Packages`,
+and in this case the `python3` interpreter is automatically used.
+
+## FAQ {#faq}
+
+### How to solve circular dependencies? {#how-to-solve-circular-dependencies}
+
+Consider the packages `A` and `B` that depend on each other. When packaging `B`,
+a solution is to override package `A` not to depend on `B` as an input. The same
+should also be done when packaging `A`.
+
+### How to override a Python package? {#how-to-override-a-python-package}
+
+We can override the interpreter and pass `packageOverrides`. In the following
+example we rename the `pandas` package and build it.
+
+```nix
+with import <nixpkgs> {};
+
+(let
+  python = let
+    packageOverrides = self: super: {
+      pandas = super.pandas.overridePythonAttrs(old: {name="foo";});
+    };
+  in pkgs.python310.override {
+    inherit packageOverrides;
+  };
+
+in python.withPackages (ps: [
+  ps.pandas
+])).env
+```
+
+Using `nix-build` on this expression will build an environment that contains the
+package `pandas` but with the new name `foo`.
+
+All packages in the package set will use the renamed package. A typical use case
+is to switch to another version of a certain package. For example, in the
+Nixpkgs repository we have multiple versions of `django` and `scipy`. In the
+following example we use a different version of `scipy` and create an
+environment that uses it. All packages in the Python package set will now use
+the updated `scipy` version.
+
+```nix
+with import <nixpkgs> {};
+
+( let
+    packageOverrides = self: super: {
+      scipy = super.scipy_0_17;
+    };
+  in (pkgs.python310.override {
+    inherit packageOverrides;
+  }).withPackages (ps: [
+    ps.blaze
+  ])
+).env
+```
+
+The requested package `blaze` depends on `pandas` which itself depends on `scipy`.
+
+If you want the whole of Nixpkgs to use your modifications, then you can use
+`overlays` as explained in this manual. In the following example we build a
+`inkscape` using a different version of `numpy`.
+
+```nix
+let
+  pkgs = import <nixpkgs> {};
+  newpkgs = import pkgs.path { overlays = [ (self: super: {
+    python310 = let
+      packageOverrides = python-self: python-super: {
+        numpy = python-super.numpy_1_18;
+      };
+    in super.python310.override {inherit packageOverrides;};
+  } ) ]; };
+in newpkgs.inkscape
+```
+
+### `python setup.py bdist_wheel` cannot create .whl {#python-setup.py-bdist_wheel-cannot-create-.whl}
+
+Executing `python setup.py bdist_wheel` in a `nix-shell`fails with
+
+```
+ValueError: ZIP does not support timestamps before 1980
+```
+
+This is because files from the Nix store (which have a timestamp of the UNIX
+epoch of January 1, 1970) are included in the .ZIP, but .ZIP archives follow the
+DOS convention of counting timestamps from 1980.
+
+The command `bdist_wheel` reads the `SOURCE_DATE_EPOCH` environment variable,
+which `nix-shell` sets to 1. Unsetting this variable or giving it a value
+corresponding to 1980 or later enables building wheels.
+
+Use 1980 as timestamp:
+
+```shell
+nix-shell --run "SOURCE_DATE_EPOCH=315532800 python3 setup.py bdist_wheel"
+```
+
+or the current time:
+
+```shell
+nix-shell --run "SOURCE_DATE_EPOCH=$(date +%s) python3 setup.py bdist_wheel"
+```
+
+or unset `SOURCE_DATE_EPOCH`:
+
+```shell
+nix-shell --run "unset SOURCE_DATE_EPOCH; python3 setup.py bdist_wheel"
+```
+
+### `install_data` / `data_files` problems {#install_data-data_files-problems}
+
+If you get the following error:
+
+```
+could not create '/nix/store/6l1bvljpy8gazlsw2aw9skwwp4pmvyxw-python-2.7.8/etc':
+Permission denied
+```
+
+This is a [known bug](https://github.com/pypa/setuptools/issues/130) in
+`setuptools`. Setuptools `install_data` does not respect `--prefix`. An example
+of such package using the feature is `pkgs/tools/X11/xpra/default.nix`.
+
+As workaround install it as an extra `preInstall` step:
+
+```shell
+${python.pythonOnBuildForHost.interpreter} setup.py install_data --install-dir=$out --root=$out
+sed -i '/ = data\_files/d' setup.py
+```
+
+### Rationale of non-existent global site-packages {#rationale-of-non-existent-global-site-packages}
+
+On most operating systems a global `site-packages` is maintained. This however
+becomes problematic if you want to run multiple Python versions or have multiple
+versions of certain libraries for your projects. Generally, you would solve such
+issues by creating virtual environments using `virtualenv`.
+
+On Nix each package has an isolated dependency tree which, in the case of
+Python, guarantees the right versions of the interpreter and libraries or
+packages are available. There is therefore no need to maintain a global `site-packages`.
+
+If you want to create a Python environment for development, then the recommended
+method is to use `nix-shell`, either with or without the [`python.buildEnv`](#python.buildenv-function)
+function.
+
+### How to consume Python modules using pip in a virtual environment like I am used to on other Operating Systems? {#how-to-consume-python-modules-using-pip-in-a-virtual-environment-like-i-am-used-to-on-other-operating-systems}
+
+While this approach is not very idiomatic from Nix perspective, it can still be
+useful when dealing with pre-existing projects or in situations where it's not
+feasible or desired to write derivations for all required dependencies.
+
+This is an example of a `default.nix` for a `nix-shell`, which allows to consume
+a virtual environment created by `venv`, and install Python modules through
+`pip` the traditional way.
+
+Create this `default.nix` file, together with a `requirements.txt` and
+execute `nix-shell`.
+
+```nix
+with import <nixpkgs> { };
+
+let
+  pythonPackages = python3Packages;
+in pkgs.mkShell rec {
+  name = "impurePythonEnv";
+  venvDir = "./.venv";
+  buildInputs = [
+    # A Python interpreter including the 'venv' module is required to bootstrap
+    # the environment.
+    pythonPackages.python
+
+    # This executes some shell code to initialize a venv in $venvDir before
+    # dropping into the shell
+    pythonPackages.venvShellHook
+
+    # Those are dependencies that we would like to use from nixpkgs, which will
+    # add them to PYTHONPATH and thus make them accessible from within the venv.
+    pythonPackages.numpy
+    pythonPackages.requests
+
+    # In this particular example, in order to compile any binary extensions they may
+    # require, the Python modules listed in the hypothetical requirements.txt need
+    # the following packages to be installed locally:
+    taglib
+    openssl
+    git
+    libxml2
+    libxslt
+    libzip
+    zlib
+  ];
+
+  # Run this command, only after creating the virtual environment
+  postVenvCreation = ''
+    unset SOURCE_DATE_EPOCH
+    pip install -r requirements.txt
+  '';
+
+  # Now we can execute any commands within the virtual environment.
+  # This is optional and can be left out to run pip manually.
+  postShellHook = ''
+    # allow pip to install wheels
+    unset SOURCE_DATE_EPOCH
+  '';
+
+}
+```
+
+In case the supplied venvShellHook is insufficient, or when Python 2 support is
+needed, you can define your own shell hook and adapt to your needs like in the
+following example:
+
+```nix
+with import <nixpkgs> { };
+
+let
+  venvDir = "./.venv";
+  pythonPackages = python3Packages;
+in pkgs.mkShell rec {
+  name = "impurePythonEnv";
+  buildInputs = [
+    pythonPackages.python
+    # Needed when using python 2.7
+    # pythonPackages.virtualenv
+    # ...
+  ];
+
+  # This is very close to how venvShellHook is implemented, but
+  # adapted to use 'virtualenv'
+  shellHook = ''
+    SOURCE_DATE_EPOCH=$(date +%s)
+
+    if [ -d "${venvDir}" ]; then
+      echo "Skipping venv creation, '${venvDir}' already exists"
+    else
+      echo "Creating new venv environment in path: '${venvDir}'"
+      # Note that the module venv was only introduced in python 3, so for 2.7
+      # this needs to be replaced with a call to virtualenv
+      ${pythonPackages.python.interpreter} -m venv "${venvDir}"
+    fi
+
+    # Under some circumstances it might be necessary to add your virtual
+    # environment to PYTHONPATH, which you can do here too;
+    # PYTHONPATH=$PWD/${venvDir}/${pythonPackages.python.sitePackages}/:$PYTHONPATH
+
+    source "${venvDir}/bin/activate"
+
+    # As in the previous example, this is optional.
+    pip install -r requirements.txt
+  '';
+}
+```
+
+Note that the `pip install` is an imperative action. So every time `nix-shell`
+is executed it will attempt to download the Python modules listed in
+requirements.txt. However these will be cached locally within the `virtualenv`
+folder and not downloaded again.
+
+### How to override a Python package from `configuration.nix`? {#how-to-override-a-python-package-from-configuration.nix}
+
+If you need to change a package's attribute(s) from `configuration.nix` you could do:
+
+```nix
+{
+  nixpkgs.config.packageOverrides = super: {
+    python3 = super.python3.override {
+      packageOverrides = python-self: python-super: {
+        twisted = python-super.twisted.overridePythonAttrs (oldAttrs: {
+          src = super.fetchPypi {
+            pname = "Twisted";
+            version = "19.10.0";
+            hash = "sha256-c5S6fycq5yKnTz2Wnc9Zm8TvCTvDkgOHSKSQ8XJKUV0=";
+            extension = "tar.bz2";
+          };
+        });
+      };
+    };
+  };
+}
+```
+
+`python3Packages.twisted` is now globally overridden.
+All packages and also all NixOS services that reference `twisted`
+(such as `services.buildbot-worker`) now use the new definition.
+Note that `python-super` refers to the old package set and `python-self`
+to the new, overridden version.
+
+To modify only a Python package set instead of a whole Python derivation, use
+this snippet:
+
+```nix
+{
+  myPythonPackages = python3Packages.override {
+    overrides = self: super: {
+      twisted = <...>;
+    };
+  };
+}
+```
+
+### How to override a Python package using overlays? {#how-to-override-a-python-package-using-overlays}
+
+Use the following overlay template:
+
+```nix
+self: super: {
+  python = super.python.override {
+    packageOverrides = python-self: python-super: {
+      twisted = python-super.twisted.overrideAttrs (oldAttrs: {
+        src = super.fetchPypi {
+          pname = "Twisted";
+          version = "19.10.0";
+          hash = "sha256-c5S6fycq5yKnTz2Wnc9Zm8TvCTvDkgOHSKSQ8XJKUV0=";
+          extension = "tar.bz2";
+        };
+      });
+    };
+  };
+}
+```
+
+### How to override a Python package for all Python versions using extensions? {#how-to-override-a-python-package-for-all-python-versions-using-extensions}
+
+The following overlay overrides the call to [`buildPythonPackage`](#buildpythonpackage-function) for the
+`foo` package for all interpreters by appending a Python extension to the
+`pythonPackagesExtensions` list of extensions.
+
+```nix
+final: prev: {
+  pythonPackagesExtensions = prev.pythonPackagesExtensions ++ [
+    (
+      python-final: python-prev: {
+        foo = python-prev.foo.overridePythonAttrs (oldAttrs: {
+          # ...
+        });
+      }
+    )
+  ];
+}
+```
+
+### How to use Intel’s MKL with numpy and scipy? {#how-to-use-intels-mkl-with-numpy-and-scipy}
+
+MKL can be configured using an overlay. See the section "[Using overlays to
+configure alternatives](#sec-overlays-alternatives-blas-lapack)".
+
+### What inputs do `setup_requires`, `install_requires` and `tests_require` map to? {#what-inputs-do-setup_requires-install_requires-and-tests_require-map-to}
+
+In a `setup.py` or `setup.cfg` it is common to declare dependencies:
+
+* `setup_requires` corresponds to `build-system`
+* `install_requires` corresponds to `dependencies`
+* `tests_require` corresponds to [`nativeCheckInputs`](#var-stdenv-nativeCheckInputs)
+
+### How to enable interpreter optimizations? {#optimizations}
+
+The Python interpreters are by default not built with optimizations enabled, because
+the builds are in that case not reproducible. To enable optimizations, override the
+interpreter of interest, e.g using
+
+```nix
+let
+  pkgs = import ./. {};
+  mypython = pkgs.python3.override {
+    enableOptimizations = true;
+    reproducibleBuild = false;
+    self = mypython;
+  };
+in mypython
+```
+
+### How to add optional dependencies? {#python-optional-dependencies}
+
+Some packages define optional dependencies for additional features. With
+`setuptools` this is called `extras_require` and `flit` calls it
+`extras-require`, while PEP 621 calls these `optional-dependencies`.
+
+```nix
+{
+  optional-dependencies = {
+    complete = [ distributed ];
+  };
+}
+```
+
+and letting the package requiring the extra add the list to its dependencies
+
+```nix
+{
+  dependencies = [
+    # ...
+  ] ++ dask.optional-dependencies.complete;
+}
+```
+
+This method is using `passthru`, meaning that changing `optional-dependencies` of a package won't cause it to rebuild.
+
+Note this method is preferred over adding parameters to builders, as that can
+result in packages depending on different variants and thereby causing
+collisions.
+
+### How to contribute a Python package to nixpkgs? {#tools}
+
+Packages inside nixpkgs must use the [`buildPythonPackage`](#buildpythonpackage-function) or [`buildPythonApplication`](#buildpythonapplication-function) function directly,
+because we can only provide security support for non-vendored dependencies.
+
+We recommend [nix-init](https://github.com/nix-community/nix-init) for creating new python packages within nixpkgs,
+as it already prefetches the source, parses dependencies for common formats and prefills most things in `meta`.
+
+### Are Python interpreters built deterministically? {#deterministic-builds}
+
+The Python interpreters are now built deterministically. Minor modifications had
+to be made to the interpreters in order to generate deterministic bytecode. This
+has security implications and is relevant for those using Python in a
+`nix-shell`.
+
+When the environment variable `DETERMINISTIC_BUILD` is set, all bytecode will
+have timestamp 1. The [`buildPythonPackage`](#buildpythonpackage-function) function sets `DETERMINISTIC_BUILD=1`
+and [PYTHONHASHSEED=0](https://docs.python.org/3.11/using/cmdline.html#envvar-PYTHONHASHSEED).
+Both are also exported in `nix-shell`.
+
+### How to provide automatic tests to Python packages? {#automatic-tests}
+
+It is recommended to test packages as part of the build process.
+Source distributions (`sdist`) often include test files, but not always.
+
+By default the command `python setup.py test` is run as part of the
+[`checkPhase`](#ssec-check-phase), but often it is necessary to pass a custom [`checkPhase`](#ssec-check-phase). An
+example of such a situation is when `py.test` is used.
+
+#### Common issues {#common-issues}
+
+* Non-working tests can often be deselected. By default [`buildPythonPackage`](#buildpythonpackage-function)
+  runs `python setup.py test`. which is deprecated. Most Python modules however
+  do follow the standard test protocol where the pytest runner can be used
+  instead. `pytest` supports the `-k` and `--ignore` parameters to ignore test
+  methods or classes as well as whole files. For `pytestCheckHook` these are
+  conveniently exposed as `disabledTests` and `disabledTestPaths` respectively.
+
+  ```nix
+  buildPythonPackage {
+    # ...
+    nativeCheckInputs = [
+      pytestCheckHook
+    ];
+
+    disabledTests = [
+      "function_name"
+      "other_function"
+    ];
+
+    disabledTestPaths = [
+      "this/file.py"
+    ];
+  }
+  ```
+
+* Tests that attempt to access `$HOME` can be fixed by using the following
+  work-around before running tests (e.g. `preCheck`): `export HOME=$(mktemp -d)`
+* Compiling with Cython causes tests to fail with a `ModuleNotLoadedError`.
+  This can be fixed with two changes in the derivation: 1) replacing `pytest` with
+  `pytestCheckHook` and 2) adding a `preCheck` containing `cd $out` to run
+  tests within the built output.
+
+## Contributing {#contributing}
+
+### Contributing guidelines {#contributing-guidelines}
+
+The following rules are desired to be respected:
+
+* Python libraries are called from `python-packages.nix` and packaged with
+  [`buildPythonPackage`](#buildpythonpackage-function). The expression of a library should be in
+  `pkgs/development/python-modules/<name>/default.nix`.
+* Python applications live outside of `python-packages.nix` and are packaged
+  with [`buildPythonApplication`](#buildpythonapplication-function).
+* Make sure libraries build for all Python interpreters.
+* By default we enable tests. Make sure the tests are found and, in the case of
+  libraries, are passing for all interpreters. If certain tests fail they can be
+  disabled individually. Try to avoid disabling the tests altogether. In any
+  case, when you disable tests, leave a comment explaining why.
+* Commit names of Python libraries should reflect that they are Python
+  libraries, so write for example `python311Packages.numpy: 1.11 -> 1.12`.
+  It is highly recommended to specify the current default version to enable
+  automatic build by ofborg.
+* Attribute names in `python-packages.nix` as well as `pname`s should match the
+  library's name on PyPI, but be normalized according to [PEP
+  0503](https://www.python.org/dev/peps/pep-0503/#normalized-names). This means
+  that characters should be converted to lowercase and `.` and `_` should be
+  replaced by a single `-` (foo-bar-baz instead of Foo__Bar.baz).
+  If necessary, `pname` has to be given a different value within `fetchPypi`.
+* Packages from sources such as GitHub and GitLab that do not exist on PyPI
+  should not use a name that is already used on PyPI. When possible, they should
+  use the package repository name prefixed with the owner (e.g. organization) name
+  and using a `-` as delimiter.
+* Attribute names in `python-packages.nix` should be sorted alphanumerically to
+  avoid merge conflicts and ease locating attributes.
+
+## Package set maintenance {#python-package-set-maintenance}
+
+The whole Python package set has a lot of packages that do not see regular
+updates, because they either are a very fragile component in the Python
+ecosystem, like for example the `hypothesis` package, or packages that have
+no maintainer, so maintenance falls back to the package set maintainers.
+
+### Updating packages in bulk {#python-package-bulk-updates}
+
+There is a tool to update alot of python libraries in bulk, it exists at
+`maintainers/scripts/update-python-libraries` with this repository.
+
+It can quickly update minor or major versions for all packages selected
+and create update commits, and supports the `fetchPypi`, `fetchurl` and
+`fetchFromGitHub` fetchers. When updating lots of packages that are
+hosted on GitHub, exporting a `GITHUB_API_TOKEN` is highly recommended.
+
+Updating packages in bulk leads to lots of breakages, which is why a
+stabilization period on the `python-updates` branch is required.
+
+If a package is fragile and often breaks during these bulks updates, it
+may be reasonable to set `passthru.skipBulkUpdate = true` in the
+derivation. This decision should not be made on a whim and should
+always be supported by a qualifying comment.
+
+Once the branch is sufficiently stable it should normally be merged
+into the `staging` branch.
+
+An exemplary call to update all python libraries between minor versions
+would be:
+
+```ShellSession
+$ maintainers/scripts/update-python-libraries --target minor --commit --use-pkgs-prefix pkgs/development/python-modules/**/default.nix
+```
+
+## CPython Update Schedule {#python-cpython-update-schedule}
+
+With [PEP 602](https://www.python.org/dev/peps/pep-0602/), CPython now
+follows a yearly release cadence. In nixpkgs, all supported interpreters
+are made available, but only the most recent two
+interpreters package sets are built; this is a compromise between being
+the latest interpreter, and what the majority of the Python packages support.
+
+New CPython interpreters are released in October. Generally, it takes some
+time for the majority of active Python projects to support the latest stable
+interpreter. To help ease the migration for Nixpkgs users
+between Python interpreters the schedule below will be used:
+
+| When | Event |
+| --- | --- |
+| After YY.11 Release | Bump CPython package set window. The latest and previous latest stable should now be built. |
+| After YY.05 Release | Bump default CPython interpreter to latest stable. |
+
+In practice, this means that the Python community will have had a stable interpreter
+for ~2 months before attempting to update the package set. And this will
+allow for ~7 months for Python applications to support the latest interpreter.
diff --git a/nixpkgs/doc/languages-frameworks/qt.section.md b/nixpkgs/doc/languages-frameworks/qt.section.md
new file mode 100644
index 000000000000..dcec4b6fff42
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/qt.section.md
@@ -0,0 +1,81 @@
+# Qt {#sec-language-qt}
+
+Writing Nix expressions for Qt libraries and applications is largely similar as for other C++ software.
+This section assumes some knowledge of the latter.
+
+The major caveat with Qt applications is that Qt uses a plugin system to load additional modules at runtime.
+In Nixpkgs, we wrap Qt applications to inject environment variables telling Qt where to discover the required plugins and QML modules.
+
+This effectively makes the runtime dependencies pure and explicit at build-time, at the cost of introducing
+an extra indirection.
+
+## Nix expression for a Qt package (default.nix) {#qt-default-nix}
+
+```nix
+{ stdenv, qt6 }:
+
+stdenv.mkDerivation {
+  pname = "myapp";
+  version = "1.0";
+
+  buildInputs = [ qt6.qtbase ];
+  nativeBuildInputs = [ qt6.wrapQtAppsHook ];
+}
+```
+
+The same goes for Qt 5 where libraries and tools are under `libsForQt5`.
+
+Any Qt package should include `wrapQtAppsHook` in `nativeBuildInputs`, or explicitly set `dontWrapQtApps` to bypass generating the wrappers.
+
+::: {.note}
+Qt 6 graphical applications should also include `qtwayland` in `buildInputs` on Linux (but not on platforms e.g. Darwin, where `qtwayland` is not available), to ensure the Wayland platform plugin is available.
+
+This may become default in the future, see [NixOS/nixpkgs#269674](https://github.com/NixOS/nixpkgs/pull/269674).
+:::
+
+## Packages supporting multiple Qt versions {#qt-versions}
+
+If your package is a library that can be built with multiple Qt versions, you may want to take Qt modules as separate arguments (`qtbase`, `qtdeclarative` etc.), and invoke the package from `pkgs/top-level/qt5-packages.nix` or `pkgs/top-level/qt6-packages.nix` using the respective `callPackage` functions.
+
+Applications should generally be built with upstream's preferred Qt version.
+
+## Locating additional runtime dependencies {#qt-runtime-dependencies}
+
+Add entries to `qtWrapperArgs` are to modify the wrappers created by
+`wrapQtAppsHook`:
+
+```nix
+{ stdenv, qt6 }:
+
+stdenv.mkDerivation {
+  # ...
+  nativeBuildInputs = [ qt6.wrapQtAppsHook ];
+  qtWrapperArgs = [ ''--prefix PATH : /path/to/bin'' ];
+}
+```
+
+The entries are passed as arguments to [wrapProgram](#fun-wrapProgram).
+
+If you need more control over the wrapping process, set `dontWrapQtApps` to disable automatic wrapper generation,
+and then create wrappers manually in `fixupPhase`, using `wrapQtApp`, which itself is a small wrapper over [wrapProgram](#fun-wrapProgram):
+
+The `makeWrapper` arguments required for Qt are also exposed in the environment as `$qtWrapperArgs`.
+
+```nix
+{ stdenv, lib, wrapQtAppsHook }:
+
+stdenv.mkDerivation {
+  # ...
+  nativeBuildInputs = [ wrapQtAppsHook ];
+  dontWrapQtApps = true;
+  preFixup = ''
+      wrapQtApp "$out/bin/myapp" --prefix PATH : /path/to/bin
+  '';
+}
+```
+
+::: {.note}
+`wrapQtAppsHook` ignores files that are non-ELF executables.
+This means that scripts won't be automatically wrapped so you'll need to manually wrap them as previously mentioned.
+An example of when you'd always need to do this is with Python applications that use PyQt.
+:::
diff --git a/nixpkgs/doc/languages-frameworks/r.section.md b/nixpkgs/doc/languages-frameworks/r.section.md
new file mode 100644
index 000000000000..ad0fb10987c9
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/r.section.md
@@ -0,0 +1,127 @@
+# R {#r}
+
+## Installation {#installation}
+
+Define an environment for R that contains all the libraries that you'd like to
+use by adding the following snippet to your $HOME/.config/nixpkgs/config.nix file:
+
+```nix
+{
+    packageOverrides = super: let self = super.pkgs; in
+    {
+
+        rEnv = super.rWrapper.override {
+            packages = with self.rPackages; [
+                devtools
+                ggplot2
+                reshape2
+                yaml
+                optparse
+                ];
+        };
+    };
+}
+```
+
+Then you can use `nix-env -f "<nixpkgs>" -iA rEnv` to install it into your user
+profile. The set of available libraries can be discovered by running the
+command `nix-env -f "<nixpkgs>" -qaP -A rPackages`. The first column from that
+output is the name that has to be passed to rWrapper in the code snipped above.
+
+However, if you'd like to add a file to your project source to make the
+environment available for other contributors, you can create a `default.nix`
+file like so:
+
+```nix
+with import <nixpkgs> {};
+{
+  myProject = stdenv.mkDerivation {
+    name = "myProject";
+    version = "1";
+    src = if lib.inNixShell then null else nix;
+
+    buildInputs = with rPackages; [
+      R
+      ggplot2
+      knitr
+    ];
+  };
+}
+```
+and then run `nix-shell .` to be dropped into a shell with those packages
+available.
+
+## RStudio {#rstudio}
+
+RStudio uses a standard set of packages and ignores any custom R
+environments or installed packages you may have.  To create a custom
+environment, see `rstudioWrapper`, which functions similarly to
+`rWrapper`:
+
+```nix
+{
+    packageOverrides = super: let self = super.pkgs; in
+    {
+
+        rstudioEnv = super.rstudioWrapper.override {
+            packages = with self.rPackages; [
+                dplyr
+                ggplot2
+                reshape2
+                ];
+        };
+    };
+}
+```
+
+Then like above, `nix-env -f "<nixpkgs>" -iA rstudioEnv` will install
+this into your user profile.
+
+Alternatively, you can create a self-contained `shell.nix` without the need to
+modify any configuration files:
+
+```nix
+{ pkgs ? import <nixpkgs> {}
+}:
+
+pkgs.rstudioWrapper.override {
+  packages = with pkgs.rPackages; [ dplyr ggplot2 reshape2 ];
+}
+
+```
+
+Executing `nix-shell` will then drop you into an environment equivalent to the
+one above. If you need additional packages just add them to the list and
+re-enter the shell.
+
+## Updating the package set {#updating-the-package-set}
+
+There is a script and associated environment for regenerating the package
+sets and synchronising the rPackages tree to the current CRAN and matching
+BIOC release. These scripts are found in the `pkgs/development/r-modules`
+directory and executed as follows:
+
+```bash
+nix-shell generate-shell.nix
+
+Rscript generate-r-packages.R cran  > cran-packages.nix.new
+mv cran-packages.nix.new cran-packages.nix
+
+Rscript generate-r-packages.R bioc  > bioc-packages.nix.new
+mv bioc-packages.nix.new bioc-packages.nix
+
+Rscript generate-r-packages.R bioc-annotation > bioc-annotation-packages.nix.new
+mv bioc-annotation-packages.nix.new bioc-annotation-packages.nix
+
+Rscript generate-r-packages.R bioc-experiment > bioc-experiment-packages.nix.new
+mv bioc-experiment-packages.nix.new bioc-experiment-packages.nix
+```
+
+`generate-r-packages.R <repo>` reads  `<repo>-packages.nix`, therefore
+the renaming.
+
+Some packages require overrides to specify external dependencies or other
+patches and special requirements. These overrides are specified in the
+`pkgs/development/r-modules/default.nix` file. As the `*-packages.nix`
+contents are automatically generated it should not be edited and broken
+builds should be addressed using overrides.
diff --git a/nixpkgs/doc/languages-frameworks/ruby.section.md b/nixpkgs/doc/languages-frameworks/ruby.section.md
new file mode 100644
index 000000000000..7dede6944a3d
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/ruby.section.md
@@ -0,0 +1,296 @@
+# Ruby {#sec-language-ruby}
+
+## Using Ruby {#using-ruby}
+
+Several versions of Ruby interpreters are available on Nix, as well as over 250 gems and many applications written in Ruby. The attribute `ruby` refers to the default Ruby interpreter, which is currently MRI 3.1. It's also possible to refer to specific versions, e.g. `ruby_3_y`, `jruby`, or `mruby`.
+
+In the Nixpkgs tree, Ruby packages can be found throughout, depending on what they do, and are called from the main package set. Ruby gems, however are separate sets, and there's one default set for each interpreter (currently MRI only).
+
+There are two main approaches for using Ruby with gems. One is to use a specifically locked `Gemfile` for an application that has very strict dependencies. The other is to depend on the common gems, which we'll explain further down, and rely on them being updated regularly.
+
+The interpreters have common attributes, namely `gems`, and `withPackages`. So you can refer to `ruby.gems.nokogiri`, or `ruby_3_2.gems.nokogiri` to get the Nokogiri gem already compiled and ready to use.
+
+Since not all gems have executables like `nokogiri`, it's usually more convenient to use the `withPackages` function like this: `ruby.withPackages (p: with p; [ nokogiri ])`. This will also make sure that the Ruby in your environment will be able to find the gem and it can be used in your Ruby code (for example via `ruby` or `irb` executables) via `require "nokogiri"` as usual.
+
+### Temporary Ruby environment with `nix-shell` {#temporary-ruby-environment-with-nix-shell}
+
+Rather than having a single Ruby environment shared by all Ruby development projects on a system, Nix allows you to create separate environments per project. `nix-shell` gives you the possibility to temporarily load another environment akin to a combined `chruby` or `rvm` and `bundle exec`.
+
+There are two methods for loading a shell with Ruby packages. The first and recommended method is to create an environment with `ruby.withPackages` and load that.
+
+```ShellSession
+$ nix-shell -p "ruby.withPackages (ps: with ps; [ nokogiri pry ])"
+```
+
+The other method, which is not recommended, is to create an environment and list all the packages directly.
+
+```ShellSession
+$ nix-shell -p ruby.gems.nokogiri ruby.gems.pry
+```
+
+Again, it's possible to launch the interpreter from the shell. The Ruby interpreter has the attribute `gems` which contains all Ruby gems for that specific interpreter.
+
+#### Load Ruby environment from `.nix` expression {#load-ruby-environment-from-.nix-expression}
+
+As explained [in the `nix-shell` section](https://nixos.org/manual/nix/stable/command-ref/nix-shell) of the Nix manual, `nix-shell` can also load an expression from a `.nix` file.
+Say we want to have Ruby, `nokogori`, and `pry`. Consider a `shell.nix` file with:
+
+```nix
+with import <nixpkgs> {};
+ruby.withPackages (ps: with ps; [ nokogiri pry ])
+```
+
+What's happening here?
+
+1. We begin with importing the Nix Packages collections. `import <nixpkgs>` imports the `<nixpkgs>` function, `{}` calls it and the `with` statement brings all attributes of `nixpkgs` in the local scope. These attributes form the main package set.
+2. Then we create a Ruby environment with the `withPackages` function.
+3. The `withPackages` function expects us to provide a function as an argument that takes the set of all ruby gems and returns a list of packages to include in the environment. Here, we select the packages `nokogiri` and `pry` from the package set.
+
+#### Execute command with `--run` {#execute-command-with---run}
+
+A convenient flag for `nix-shell` is `--run`. It executes a command in the `nix-shell`. We can e.g. directly open a `pry` REPL:
+
+```ShellSession
+$ nix-shell -p "ruby.withPackages (ps: with ps; [ nokogiri pry ])" --run "pry"
+```
+
+Or immediately require `nokogiri` in pry:
+
+```ShellSession
+$ nix-shell -p "ruby.withPackages (ps: with ps; [ nokogiri pry ])" --run "pry -rnokogiri"
+```
+
+Or run a script using this environment:
+
+```ShellSession
+$ nix-shell -p "ruby.withPackages (ps: with ps; [ nokogiri pry ])" --run "ruby example.rb"
+```
+
+#### Using `nix-shell` as shebang {#using-nix-shell-as-shebang}
+
+In fact, for the last case, there is a more convenient method. You can add a [shebang](<https://en.wikipedia.org/wiki/Shebang_(Unix)>) to your script specifying which dependencies `nix-shell` needs. With the following shebang, you can just execute `./example.rb`, and it will run with all dependencies.
+
+```ruby
+#! /usr/bin/env nix-shell
+#! nix-shell -i ruby -p "ruby.withPackages (ps: with ps; [ nokogiri rest-client ])"
+
+require 'nokogiri'
+require 'rest-client'
+
+body = RestClient.get('http://example.com').body
+puts Nokogiri::HTML(body).at('h1').text
+```
+
+## Developing with Ruby {#developing-with-ruby}
+
+### Using an existing Gemfile {#using-an-existing-gemfile}
+
+In most cases, you'll already have a `Gemfile.lock` listing all your dependencies. This can be used to generate a `gemset.nix` which is used to fetch the gems and combine them into a single environment. The reason why you need to have a separate file for this, is that Nix requires you to have a checksum for each input to your build. Since the `Gemfile.lock` that `bundler` generates doesn't provide us with checksums, we have to first download each gem, calculate its SHA256, and store it in this separate file.
+
+So the steps from having just a `Gemfile` to a `gemset.nix` are:
+
+```ShellSession
+$ bundle lock
+$ bundix
+```
+
+If you already have a `Gemfile.lock`, you can run `bundix` and it will work the same.
+
+To update the gems in your `Gemfile.lock`, you may use the `bundix -l` flag, which will create a new `Gemfile.lock` in case the `Gemfile` has a more recent time of modification.
+
+Once the `gemset.nix` is generated, it can be used in a `bundlerEnv` derivation. Here is an example you could use for your `shell.nix`:
+
+```nix
+# ...
+let
+  gems = bundlerEnv {
+    name = "gems-for-some-project";
+    gemdir = ./.;
+  };
+in mkShell { packages = [ gems gems.wrappedRuby ]; }
+```
+
+With this file in your directory, you can run `nix-shell` to build and use the gems. The important parts here are `bundlerEnv` and `wrappedRuby`.
+
+The `bundlerEnv` is a wrapper over all the gems in your gemset. This means that all the `/lib` and `/bin` directories will be available, and the executables of all gems (even of indirect dependencies) will end up in your `$PATH`. The `wrappedRuby` provides you with all executables that come with Ruby itself, but wrapped so they can easily find the gems in your gemset.
+
+One common issue that you might have is that you have Ruby, but also `bundler` in your gemset. That leads to a conflict for `/bin/bundle` and `/bin/bundler`. You can resolve this by wrapping either your Ruby or your gems in a `lowPrio` call. So in order to give the `bundler` from your gemset priority, it would be used like this:
+
+```nix
+# ...
+mkShell { buildInputs = [ gems (lowPrio gems.wrappedRuby) ]; }
+```
+
+Sometimes a Gemfile references other files. Such as `.ruby-version` or vendored gems. When copying the Gemfile to the nix store we need to copy those files alongside. This can be done using `extraConfigPaths`. For example:
+
+```nix
+{
+  gems = bundlerEnv {
+    name = "gems-for-some-project";
+    gemdir = ./.;
+    extraConfigPaths = [ "${./.}/.ruby-version" ];
+  };
+}
+```
+
+### Gem-specific configurations and workarounds {#gem-specific-configurations-and-workarounds}
+
+In some cases, especially if the gem has native extensions, you might need to modify the way the gem is built.
+
+This is done via a common configuration file that includes all of the workarounds for each gem.
+
+This file lives at `/pkgs/development/ruby-modules/gem-config/default.nix`, since it already contains a lot of entries, it should be pretty easy to add the modifications you need for your needs.
+
+In the meanwhile, or if the modification is for a private gem, you can also add the configuration to only your own environment.
+
+Two places that allow this modification are the `ruby` derivation, or `bundlerEnv`.
+
+Here's the `ruby` one:
+
+```nix
+{ pg_version ? "10", pkgs ? import <nixpkgs> { } }:
+let
+  myRuby = pkgs.ruby.override {
+    defaultGemConfig = pkgs.defaultGemConfig // {
+      pg = attrs: {
+        buildFlags =
+        [ "--with-pg-config=${pkgs."postgresql_${pg_version}"}/bin/pg_config" ];
+      };
+    };
+  };
+in myRuby.withPackages (ps: with ps; [ pg ])
+```
+
+And an example with `bundlerEnv`:
+
+```nix
+{ pg_version ? "10", pkgs ? import <nixpkgs> { } }:
+let
+  gems = pkgs.bundlerEnv {
+    name = "gems-for-some-project";
+    gemdir = ./.;
+    gemConfig = pkgs.defaultGemConfig // {
+      pg = attrs: {
+        buildFlags =
+        [ "--with-pg-config=${pkgs."postgresql_${pg_version}"}/bin/pg_config" ];
+      };
+    };
+  };
+in mkShell { buildInputs = [ gems gems.wrappedRuby ]; }
+```
+
+And finally via overlays:
+
+```nix
+{ pg_version ? "10" }:
+let
+  pkgs = import <nixpkgs> {
+    overlays = [
+      (self: super: {
+        defaultGemConfig = super.defaultGemConfig // {
+          pg = attrs: {
+            buildFlags = [
+              "--with-pg-config=${
+                pkgs."postgresql_${pg_version}"
+              }/bin/pg_config"
+            ];
+          };
+        };
+      })
+    ];
+  };
+in pkgs.ruby.withPackages (ps: with ps; [ pg ])
+```
+
+Then we can get whichever postgresql version we desire and the `pg` gem will always reference it correctly:
+
+```ShellSession
+$ nix-shell --argstr pg_version 9_4 --run 'ruby -rpg -e "puts PG.library_version"'
+90421
+
+$ nix-shell --run 'ruby -rpg -e "puts PG.library_version"'
+100007
+```
+
+Of course for this use-case one could also use overlays since the configuration for `pg` depends on the `postgresql` alias, but for demonstration purposes this has to suffice.
+
+### Platform-specific gems {#ruby-platform-specif-gems}
+
+Right now, bundix has some issues with pre-built, platform-specific gems: [bundix PR #68](https://github.com/nix-community/bundix/pull/68).
+Until this is solved, you can tell bundler to not use platform-specific gems and instead build them from source each time:
+- globally (will be set in `~/.config/.bundle/config`):
+```shell
+$ bundle config set force_ruby_platform true
+```
+- locally (will be set in `<project-root>/.bundle/config`):
+```shell
+$ bundle config set --local force_ruby_platform true
+```
+
+### Adding a gem to the default gemset {#adding-a-gem-to-the-default-gemset}
+
+Now that you know how to get a working Ruby environment with Nix, it's time to go forward and start actually developing with Ruby. We will first have a look at how Ruby gems are packaged on Nix. Then, we will look at how you can use development mode with your code.
+
+All gems in the standard set are automatically generated from a single `Gemfile`. The dependency resolution is done with `bundler` and makes it more likely that all gems are compatible to each other.
+
+In order to add a new gem to nixpkgs, you can put it into the `/pkgs/development/ruby-modules/with-packages/Gemfile` and run `./maintainers/scripts/update-ruby-packages`.
+
+To test that it works, you can then try using the gem with:
+
+```shell
+NIX_PATH=nixpkgs=$PWD nix-shell -p "ruby.withPackages (ps: with ps; [ name-of-your-gem ])"
+```
+
+### Packaging applications {#packaging-applications}
+
+A common task is to add a ruby executable to nixpkgs, popular examples would be `chef`, `jekyll`, or `sass`. A good way to do that is to use the `bundlerApp` function, that allows you to make a package that only exposes the listed executables, otherwise the package may cause conflicts through common paths like `bin/rake` or `bin/bundler` that aren't meant to be used.
+
+The absolute easiest way to do that is to write a `Gemfile` along these lines:
+
+```ruby
+source 'https://rubygems.org' do
+  gem 'mdl'
+end
+```
+
+If you want to package a specific version, you can use the standard Gemfile syntax for that, e.g. `gem 'mdl', '0.5.0'`, but if you want the latest stable version anyway, it's easier to update by running the `bundle lock` and `bundix` steps again.
+
+Now you can also make a `default.nix` that looks like this:
+
+```nix
+{ bundlerApp }:
+
+bundlerApp {
+  pname = "mdl";
+  gemdir = ./.;
+  exes = [ "mdl" ];
+}
+```
+
+All that's left to do is to generate the corresponding `Gemfile.lock` and `gemset.nix` as described above in the `Using an existing Gemfile` section.
+
+#### Packaging executables that require wrapping {#packaging-executables-that-require-wrapping}
+
+Sometimes your app will depend on other executables at runtime, and tries to find it through the `PATH` environment variable.
+
+In this case, you can provide a `postBuild` hook to `bundlerApp` that wraps the gem in another script that prefixes the `PATH`.
+
+Of course you could also make a custom `gemConfig` if you know exactly how to patch it, but it's usually much easier to maintain with a simple wrapper so the patch doesn't have to be adjusted for each version.
+
+Here's another example:
+
+```nix
+{ lib, bundlerApp, makeWrapper, git, gnutar, gzip }:
+
+bundlerApp {
+  pname = "r10k";
+  gemdir = ./.;
+  exes = [ "r10k" ];
+
+  nativeBuildInputs = [ makeWrapper ];
+
+  postBuild = ''
+    wrapProgram $out/bin/r10k --prefix PATH : ${lib.makeBinPath [ git gnutar gzip ]}
+  '';
+}
+```
diff --git a/nixpkgs/doc/languages-frameworks/rust.section.md b/nixpkgs/doc/languages-frameworks/rust.section.md
new file mode 100644
index 000000000000..8a1007b7bb8a
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/rust.section.md
@@ -0,0 +1,1082 @@
+# Rust {#rust}
+
+To install the rust compiler and cargo put
+
+```nix
+{
+  environment.systemPackages = [
+    rustc
+    cargo
+  ];
+}
+```
+
+into your `configuration.nix` or bring them into scope with `nix-shell -p rustc cargo`.
+
+For other versions such as daily builds (beta and nightly),
+use either `rustup` from nixpkgs (which will manage the rust installation in your home directory),
+or use [community maintained Rust toolchains](#using-community-maintained-rust-toolchains).
+
+## `buildRustPackage`: Compiling Rust applications with Cargo {#compiling-rust-applications-with-cargo}
+
+Rust applications are packaged by using the `buildRustPackage` helper from `rustPlatform`:
+
+```nix
+{ lib, fetchFromGitHub, rustPlatform }:
+
+rustPlatform.buildRustPackage rec {
+  pname = "ripgrep";
+  version = "12.1.1";
+
+  src = fetchFromGitHub {
+    owner = "BurntSushi";
+    repo = pname;
+    rev = version;
+    hash = "sha256-+s5RBC3XSgb8omTbUNLywZnP6jSxZBKSS1BmXOjRF8M=";
+  };
+
+  cargoHash = "sha256-jtBw4ahSl88L0iuCXxQgZVm1EcboWRJMNtjxLVTtzts=";
+
+  meta = {
+    description = "A fast line-oriented regex search tool, similar to ag and ack";
+    homepage = "https://github.com/BurntSushi/ripgrep";
+    license = lib.licenses.unlicense;
+    maintainers = [];
+  };
+}
+```
+
+`buildRustPackage` requires either a `cargoHash` (preferred) or a
+`cargoSha256` attribute, computed over all crate sources of this package.
+`cargoHash` supports [SRI](https://www.w3.org/TR/SRI/) hashes and should be
+preferred over `cargoSha256` which was used for traditional Nix SHA-256 hashes.
+For example:
+
+```nix
+{
+  cargoHash = "sha256-l1vL2ZdtDRxSGvP0X/l3nMw8+6WF67KPutJEzUROjg8=";
+}
+```
+
+Exception: If the application has cargo `git` dependencies, the `cargoHash`/`cargoSha256`
+approach will not work, and you will need to copy the `Cargo.lock` file of the application
+to nixpkgs and continue with the next section for specifying the options of the `cargoLock`
+section.
+
+
+Both types of hashes are permitted when contributing to nixpkgs. The
+Cargo hash is obtained by inserting a fake checksum into the
+expression and building the package once. The correct checksum can
+then be taken from the failed build. A fake hash can be used for
+`cargoHash` as follows:
+
+```nix
+{
+  cargoHash = lib.fakeHash;
+}
+```
+
+For `cargoSha256` you can use:
+
+```nix
+{
+  cargoSha256 = lib.fakeSha256;
+}
+```
+
+Per the instructions in the [Cargo Book](https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html)
+best practices guide, Rust applications should always commit the `Cargo.lock`
+file in git to ensure a reproducible build. However, a few packages do not, and
+Nix depends on this file, so if it is missing you can use `cargoPatches` to
+apply it in the `patchPhase`. Consider sending a PR upstream with a note to the
+maintainer describing why it's important to include in the application.
+
+The fetcher will verify that the `Cargo.lock` file is in sync with the `src`
+attribute, and fail the build if not. It will also will compress the vendor
+directory into a tar.gz archive.
+
+The tarball with vendored dependencies contains a directory with the
+package's `name`, which is normally composed of `pname` and
+`version`. This means that the vendored dependencies hash
+(`cargoHash`/`cargoSha256`) is dependent on the package name and
+version. The `cargoDepsName` attribute can be used to use another name
+for the directory of vendored dependencies. For example, the hash can
+be made invariant to the version by setting `cargoDepsName` to
+`pname`:
+
+```nix
+rustPlatform.buildRustPackage rec {
+  pname = "broot";
+  version = "1.2.0";
+
+  src = fetchCrate {
+    inherit pname version;
+    hash = "sha256-aDQA4A5mScX9or3Lyiv/5GyAehidnpKKE0grhbP1Ctc=";
+  };
+
+  cargoHash = "sha256-tbrTbutUs5aPSV+yE0IBUZAAytgmZV7Eqxia7g+9zRs=";
+  cargoDepsName = pname;
+
+  # ...
+}
+```
+
+### Importing a `Cargo.lock` file {#importing-a-cargo.lock-file}
+
+Using a vendored hash (`cargoHash`/`cargoSha256`) is tedious when using
+`buildRustPackage` within a project, since it requires that the hash
+is updated after every change to `Cargo.lock`. Therefore,
+`buildRustPackage` also supports vendoring dependencies directly from
+a `Cargo.lock` file using the `cargoLock` argument. For example:
+
+```nix
+rustPlatform.buildRustPackage {
+  pname = "myproject";
+  version = "1.0.0";
+
+  cargoLock = {
+    lockFile = ./Cargo.lock;
+  };
+
+  # ...
+}
+```
+
+This will retrieve the dependencies using fixed-output derivations from
+the specified lockfile.
+
+One caveat is that `Cargo.lock` cannot be patched in the `patchPhase`
+because it runs after the dependencies have already been fetched. If
+you need to patch or generate the lockfile you can alternatively set
+`cargoLock.lockFileContents` to a string of its contents:
+
+```nix
+rustPlatform.buildRustPackage {
+  pname = "myproject";
+  version = "1.0.0";
+
+  cargoLock = let
+    fixupLockFile = path: f (builtins.readFile path);
+  in {
+    lockFileContents = fixupLockFile ./Cargo.lock;
+  };
+
+  # ...
+}
+```
+
+Note that setting `cargoLock.lockFile` or `cargoLock.lockFileContents`
+doesn't add a `Cargo.lock` to your `src`, and a `Cargo.lock` is still
+required to build a rust package. A simple fix is to use:
+
+```nix
+{
+  postPatch = ''
+    ln -s ${./Cargo.lock} Cargo.lock
+  '';
+}
+```
+
+The output hash of each dependency that uses a git source must be
+specified in the `outputHashes` attribute. For example:
+
+```nix
+rustPlatform.buildRustPackage rec {
+  pname = "myproject";
+  version = "1.0.0";
+
+  cargoLock = {
+    lockFile = ./Cargo.lock;
+    outputHashes = {
+      "finalfusion-0.14.0" = "17f4bsdzpcshwh74w5z119xjy2if6l2wgyjy56v621skr2r8y904";
+    };
+  };
+
+  # ...
+}
+```
+
+If you do not specify an output hash for a git dependency, building
+the package will fail and inform you of which crate needs to be
+added. To find the correct hash, you can first use `lib.fakeSha256` or
+`lib.fakeHash` as a stub hash. Building the package (and thus the
+vendored dependencies) will then inform you of the correct hash.
+
+For usage outside nixpkgs, `allowBuiltinFetchGit` could be used to
+avoid having to specify `outputHashes`. For example:
+
+```nix
+rustPlatform.buildRustPackage rec {
+  pname = "myproject";
+  version = "1.0.0";
+
+  cargoLock = {
+    lockFile = ./Cargo.lock;
+    allowBuiltinFetchGit = true;
+  };
+
+  # ...
+}
+```
+
+### Cargo features {#cargo-features}
+
+You can disable default features using `buildNoDefaultFeatures`, and
+extra features can be added with `buildFeatures`.
+
+If you want to use different features for check phase, you can use
+`checkNoDefaultFeatures` and `checkFeatures`. They are only passed to
+`cargo test` and not `cargo build`. If left unset, they default to
+`buildNoDefaultFeatures` and `buildFeatures`.
+
+For example:
+
+```nix
+rustPlatform.buildRustPackage rec {
+  pname = "myproject";
+  version = "1.0.0";
+
+  buildNoDefaultFeatures = true;
+  buildFeatures = [ "color" "net" ];
+
+  # disable network features in tests
+  checkFeatures = [ "color" ];
+
+  # ...
+}
+```
+
+### Cross compilation {#cross-compilation}
+
+By default, Rust packages are compiled for the host platform, just like any
+other package is.  The `--target` passed to rust tools is computed from this.
+By default, it takes the `stdenv.hostPlatform.config` and replaces components
+where they are known to differ. But there are ways to customize the argument:
+
+ - To choose a different target by name, define
+   `stdenv.hostPlatform.rustc.config` as that name (a string), and that
+   name will be used instead.
+
+   For example:
+
+   ```nix
+   import <nixpkgs> {
+     crossSystem = (import <nixpkgs/lib>).systems.examples.armhf-embedded // {
+       rustc.config = "thumbv7em-none-eabi";
+     };
+   }
+   ```
+
+   will result in:
+
+   ```shell
+   --target thumbv7em-none-eabi
+   ```
+
+ - To pass a completely custom target, define
+   `stdenv.hostPlatform.rustc.config` with its name, and
+   `stdenv.hostPlatform.rustc.platform` with the value.  The value will be
+   serialized to JSON in a file called
+   `${stdenv.hostPlatform.rustc.config}.json`, and the path of that file
+   will be used instead.
+
+   For example:
+
+   ```nix
+   import <nixpkgs> {
+     crossSystem = (import <nixpkgs/lib>).systems.examples.armhf-embedded // {
+       rustc.config = "thumb-crazy";
+       rustc.platform = { foo = ""; bar = ""; };
+     };
+   }
+   ```
+
+   will result in:
+
+   ```shell
+   --target /nix/store/asdfasdfsadf-thumb-crazy.json # contains {"foo":"","bar":""}
+   ```
+
+Note that currently custom targets aren't compiled with `std`, so `cargo test`
+will fail. This can be ignored by adding `doCheck = false;` to your derivation.
+
+### Running package tests {#running-package-tests}
+
+When using `buildRustPackage`, the `checkPhase` is enabled by default and runs
+`cargo test` on the package to build. To make sure that we don't compile the
+sources twice and to actually test the artifacts that will be used at runtime,
+the tests will be ran in the `release` mode by default.
+
+However, in some cases the test-suite of a package doesn't work properly in the
+`release` mode. For these situations, the mode for `checkPhase` can be changed like
+so:
+
+```nix
+rustPlatform.buildRustPackage {
+  /* ... */
+  checkType = "debug";
+}
+```
+
+Please note that the code will be compiled twice here: once in `release` mode
+for the `buildPhase`, and again in `debug` mode for the `checkPhase`.
+
+Test flags, e.g., `--package foo`, can be passed to `cargo test` via the
+`cargoTestFlags` attribute.
+
+Another attribute, called `checkFlags`, is used to pass arguments to the test
+binary itself, as stated
+[here](https://doc.rust-lang.org/cargo/commands/cargo-test.html).
+
+#### Tests relying on the structure of the `target/` directory {#tests-relying-on-the-structure-of-the-target-directory}
+
+Some tests may rely on the structure of the `target/` directory. Those tests
+are likely to fail because we use `cargo --target` during the build. This means that
+the artifacts
+[are stored in `target/<architecture>/release/`](https://doc.rust-lang.org/cargo/guide/build-cache.html),
+rather than in `target/release/`.
+
+This can only be worked around by patching the affected tests accordingly.
+
+#### Disabling package-tests {#disabling-package-tests}
+
+In some instances, it may be necessary to disable testing altogether (with `doCheck = false;`):
+
+* If no tests exist -- the `checkPhase` should be explicitly disabled to skip
+  unnecessary build steps to speed up the build.
+* If tests are highly impure (e.g. due to network usage).
+
+There will obviously be some corner-cases not listed above where it's sensible to disable tests.
+The above are just guidelines, and exceptions may be granted on a case-by-case basis.
+
+However, please check if it's possible to disable a problematic subset of the
+test suite and leave a comment explaining your reasoning.
+
+This can be achieved with `--skip` in `checkFlags`:
+
+```nix
+rustPlatform.buildRustPackage {
+  /* ... */
+  checkFlags = [
+    # reason for disabling test
+    "--skip=example::tests:example_test"
+  ];
+}
+```
+
+#### Using `cargo-nextest` {#using-cargo-nextest}
+
+Tests can be run with [cargo-nextest](https://github.com/nextest-rs/nextest)
+by setting `useNextest = true`. The same options still apply, but nextest
+accepts a different set of arguments and the settings might need to be
+adapted to be compatible with cargo-nextest.
+
+```nix
+rustPlatform.buildRustPackage {
+  /* ... */
+  useNextest = true;
+}
+```
+
+#### Setting `test-threads` {#setting-test-threads}
+
+`buildRustPackage` will use parallel test threads by default,
+sometimes it may be necessary to disable this so the tests run consecutively.
+
+```nix
+rustPlatform.buildRustPackage {
+  /* ... */
+  dontUseCargoParallelTests = true;
+}
+```
+
+### Building a package in `debug` mode {#building-a-package-in-debug-mode}
+
+By default, `buildRustPackage` will use `release` mode for builds. If a package
+should be built in `debug` mode, it can be configured like so:
+
+```nix
+rustPlatform.buildRustPackage {
+  /* ... */
+  buildType = "debug";
+}
+```
+
+In this scenario, the `checkPhase` will be ran in `debug` mode as well.
+
+### Custom `build`/`install`-procedures {#custom-buildinstall-procedures}
+
+Some packages may use custom scripts for building/installing, e.g. with a `Makefile`.
+In these cases, it's recommended to override the `buildPhase`/`installPhase`/`checkPhase`.
+
+Otherwise, some steps may fail because of the modified directory structure of `target/`.
+
+### Building a crate with an absent or out-of-date Cargo.lock file {#building-a-crate-with-an-absent-or-out-of-date-cargo.lock-file}
+
+`buildRustPackage` needs a `Cargo.lock` file to get all dependencies in the
+source code in a reproducible way. If it is missing or out-of-date one can use
+the `cargoPatches` attribute to update or add it.
+
+```nix
+rustPlatform.buildRustPackage rec {
+  # ...
+  cargoPatches = [
+    # a patch file to add/update Cargo.lock in the source code
+    ./add-Cargo.lock.patch
+  ];
+}
+```
+
+### Compiling non-Rust packages that include Rust code {#compiling-non-rust-packages-that-include-rust-code}
+
+Several non-Rust packages incorporate Rust code for performance- or
+security-sensitive parts. `rustPlatform` exposes several functions and
+hooks that can be used to integrate Cargo in non-Rust packages.
+
+#### Vendoring of dependencies {#vendoring-of-dependencies}
+
+Since network access is not allowed in sandboxed builds, Rust crate
+dependencies need to be retrieved using a fetcher. `rustPlatform`
+provides the `fetchCargoTarball` fetcher, which vendors all
+dependencies of a crate. For example, given a source path `src`
+containing `Cargo.toml` and `Cargo.lock`, `fetchCargoTarball`
+can be used as follows:
+
+```nix
+{
+  cargoDeps = rustPlatform.fetchCargoTarball {
+    inherit src;
+    hash = "sha256-BoHIN/519Top1NUBjpB/oEMqi86Omt3zTQcXFWqrek0=";
+  };
+}
+```
+
+The `src` attribute is required, as well as a hash specified through
+one of the `hash` attribute. The following optional attributes can
+also be used:
+
+* `name`: the name that is used for the dependencies tarball.  If
+  `name` is not specified, then the name `cargo-deps` will be used.
+* `sourceRoot`: when the `Cargo.lock`/`Cargo.toml` are in a
+  subdirectory, `sourceRoot` specifies the relative path to these
+  files.
+* `patches`: patches to apply before vendoring. This is useful when
+  the `Cargo.lock`/`Cargo.toml` files need to be patched before
+  vendoring.
+
+If a `Cargo.lock` file is available, you can alternatively use the
+`importCargoLock` function. In contrast to `fetchCargoTarball`, this
+function does not require a hash (unless git dependencies are used)
+and fetches every dependency as a separate fixed-output derivation.
+`importCargoLock` can be used as follows:
+
+```nix
+{
+  cargoDeps = rustPlatform.importCargoLock {
+    lockFile = ./Cargo.lock;
+  };
+}
+```
+
+If the `Cargo.lock` file includes git dependencies, then their output
+hashes need to be specified since they are not available through the
+lock file. For example:
+
+```nix
+{
+  cargoDeps = rustPlatform.importCargoLock {
+    lockFile = ./Cargo.lock;
+    outputHashes = {
+      "rand-0.8.3" = "0ya2hia3cn31qa8894s3av2s8j5bjwb6yq92k0jsnlx7jid0jwqa";
+    };
+  };
+}
+```
+
+If you do not specify an output hash for a git dependency, building
+`cargoDeps` will fail and inform you of which crate needs to be
+added. To find the correct hash, you can first use `lib.fakeSha256` or
+`lib.fakeHash` as a stub hash. Building `cargoDeps` will then inform
+you of the correct hash.
+
+#### Hooks {#hooks}
+
+`rustPlatform` provides the following hooks to automate Cargo builds:
+
+* `cargoSetupHook`: configure Cargo to use dependencies vendored
+  through `fetchCargoTarball`. This hook uses the `cargoDeps`
+  environment variable to find the vendored dependencies. If a project
+  already vendors its dependencies, the variable `cargoVendorDir` can
+  be used instead. When the `Cargo.toml`/`Cargo.lock` files are not in
+  `sourceRoot`, then the optional `cargoRoot` is used to specify the
+  Cargo root directory relative to `sourceRoot`.
+* `cargoBuildHook`: use Cargo to build a crate. If the crate to be
+  built is a crate in e.g. a Cargo workspace, the relative path to the
+  crate to build can be set through the optional `buildAndTestSubdir`
+  environment variable. Features can be specified with
+  `cargoBuildNoDefaultFeatures` and `cargoBuildFeatures`. Additional
+  Cargo build flags can be passed through `cargoBuildFlags`.
+* `maturinBuildHook`: use [Maturin](https://github.com/PyO3/maturin)
+  to build a Python wheel. Similar to `cargoBuildHook`, the optional
+  variable `buildAndTestSubdir` can be used to build a crate in a
+  Cargo workspace. Additional Maturin flags can be passed through
+  `maturinBuildFlags`.
+* `cargoCheckHook`: run tests using Cargo. The build type for checks
+  can be set using `cargoCheckType`. Features can be specified with
+  `cargoCheckNoDefaultFeatures` and `cargoCheckFeatures`. Additional
+  flags can be passed to the tests using `checkFlags` and
+  `checkFlagsArray`. By default, tests are run in parallel. This can
+  be disabled by setting `dontUseCargoParallelTests`.
+* `cargoNextestHook`: run tests using
+  [cargo-nextest](https://github.com/nextest-rs/nextest). The same
+  options for `cargoCheckHook` also applies to `cargoNextestHook`.
+* `cargoInstallHook`: install binaries and static/shared libraries
+  that were built using `cargoBuildHook`.
+* `bindgenHook`: for crates which use `bindgen` as a build dependency, lets
+  `bindgen` find `libclang` and `libclang` find the libraries in `buildInputs`.
+
+#### Examples {#examples}
+
+#### Python package using `setuptools-rust` {#python-package-using-setuptools-rust}
+
+For Python packages using `setuptools-rust`, you can use
+`fetchCargoTarball` and `cargoSetupHook` to retrieve and set up Cargo
+dependencies. The build itself is then performed by
+`buildPythonPackage`.
+
+The following example outlines how the `tokenizers` Python package is
+built. Since the Python package is in the `source/bindings/python`
+directory of the `tokenizers` project's source archive, we use
+`sourceRoot` to point the tooling to this directory:
+
+```nix
+{ fetchFromGitHub
+, buildPythonPackage
+, cargo
+, rustPlatform
+, rustc
+, setuptools-rust
+}:
+
+buildPythonPackage rec {
+  pname = "tokenizers";
+  version = "0.10.0";
+
+  src = fetchFromGitHub {
+    owner = "huggingface";
+    repo = pname;
+    rev = "python-v${version}";
+    hash = "sha256-rQ2hRV52naEf6PvRsWVCTN7B1oXAQGmnpJw4iIdhamw=";
+  };
+
+  cargoDeps = rustPlatform.fetchCargoTarball {
+    inherit src sourceRoot;
+    name = "${pname}-${version}";
+    hash = "sha256-miW//pnOmww2i6SOGbkrAIdc/JMDT4FJLqdMFojZeoY=";
+  };
+
+  sourceRoot = "${src.name}/bindings/python";
+
+  nativeBuildInputs = [
+    cargo
+    rustPlatform.cargoSetupHook
+    rustc
+    setuptools-rust
+  ];
+
+  # ...
+}
+```
+
+In some projects, the Rust crate is not in the main Python source
+directory.  In such cases, the `cargoRoot` attribute can be used to
+specify the crate's directory relative to `sourceRoot`. In the
+following example, the crate is in `src/rust`, as specified in the
+`cargoRoot` attribute. Note that we also need to specify the correct
+path for `fetchCargoTarball`.
+
+```nix
+
+{ buildPythonPackage
+, fetchPypi
+, rustPlatform
+, setuptools-rust
+, openssl
+}:
+
+buildPythonPackage rec {
+  pname = "cryptography";
+  version = "3.4.2"; # Also update the hash in vectors.nix
+
+  src = fetchPypi {
+    inherit pname version;
+    hash = "sha256-xGDilsjLOnls3MfVbGKnj80KCUCczZxlis5PmHzpNcQ=";
+  };
+
+  cargoDeps = rustPlatform.fetchCargoTarball {
+    inherit src;
+    sourceRoot = "${pname}-${version}/${cargoRoot}";
+    name = "${pname}-${version}";
+    hash = "sha256-PS562W4L1NimqDV2H0jl5vYhL08H9est/pbIxSdYVfo=";
+  };
+
+  cargoRoot = "src/rust";
+
+  # ...
+}
+```
+
+#### Python package using `maturin` {#python-package-using-maturin}
+
+Python packages that use [Maturin](https://github.com/PyO3/maturin)
+can be built with `fetchCargoTarball`, `cargoSetupHook`, and
+`maturinBuildHook`. For example, the following (partial) derivation
+builds the `retworkx` Python package. `fetchCargoTarball` and
+`cargoSetupHook` are used to fetch and set up the crate dependencies.
+`maturinBuildHook` is used to perform the build.
+
+```nix
+{ lib
+, buildPythonPackage
+, rustPlatform
+, fetchFromGitHub
+}:
+
+buildPythonPackage rec {
+  pname = "retworkx";
+  version = "0.6.0";
+
+  src = fetchFromGitHub {
+    owner = "Qiskit";
+    repo = "retworkx";
+    rev = version;
+    hash = "sha256-11n30ldg3y3y6qxg3hbj837pnbwjkqw3nxq6frds647mmmprrd20=";
+  };
+
+  cargoDeps = rustPlatform.fetchCargoTarball {
+    inherit src;
+    name = "${pname}-${version}";
+    hash = "sha256-heOBK8qi2nuc/Ib+I/vLzZ1fUUD/G/KTw9d7M4Hz5O0=";
+  };
+
+  format = "pyproject";
+
+  nativeBuildInputs = with rustPlatform; [ cargoSetupHook maturinBuildHook ];
+
+  # ...
+}
+```
+
+#### Rust package built with `meson` {#rust-package-built-with-meson}
+
+Some projects, especially GNOME applications, are built with the Meson Build System instead of calling Cargo directly. Using `rustPlatform.buildRustPackage` may successfully build the main program, but related files will be missing. Instead, you need to set up Cargo dependencies with `fetchCargoTarball` and `cargoSetupHook` and leave the rest to Meson. `rust` and `cargo` are still needed in `nativeBuildInputs` for Meson to use.
+
+```nix
+{ lib
+, stdenv
+, fetchFromGitLab
+, meson
+, ninja
+, pkg-config
+, rustPlatform
+, rustc
+, cargo
+, wrapGAppsHook4
+, blueprint-compiler
+, libadwaita
+, libsecret
+, tracker
+}:
+
+stdenv.mkDerivation rec {
+  pname = "health";
+  version = "0.95.0";
+
+  src = fetchFromGitLab {
+    domain = "gitlab.gnome.org";
+    owner = "World";
+    repo = "health";
+    rev = version;
+    hash = "sha256-PrNPprSS98yN8b8yw2G6hzTSaoE65VbsM3q7FVB4mds=";
+  };
+
+  cargoDeps = rustPlatform.fetchCargoTarball {
+    inherit src;
+    name = "${pname}-${version}";
+    hash = "sha256-8fa3fa+sFi5H+49B5sr2vYPkp9C9s6CcE0zv4xB8gww=";
+  };
+
+  nativeBuildInputs = [
+    meson
+    ninja
+    pkg-config
+    rustPlatform.cargoSetupHook
+    rustc
+    cargo
+    wrapGAppsHook4
+    blueprint-compiler
+  ];
+
+  buildInputs = [
+    libadwaita
+    libsecret
+    tracker
+  ];
+
+  # ...
+}
+```
+
+## `buildRustCrate`: Compiling Rust crates using Nix instead of Cargo {#compiling-rust-crates-using-nix-instead-of-cargo}
+
+### Simple operation {#simple-operation}
+
+When run, `cargo build` produces a file called `Cargo.lock`,
+containing pinned versions of all dependencies. Nixpkgs contains a
+tool called `crate2Nix` (`nix-shell -p crate2nix`), which can be
+used to turn a `Cargo.lock` into a Nix expression.  That Nix
+expression calls `rustc` directly (hence bypassing Cargo), and can
+be used to compile a crate and all its dependencies.
+
+See [`crate2nix`'s documentation](https://github.com/kolloch/crate2nix#known-restrictions)
+for instructions on how to use it.
+
+### Handling external dependencies {#handling-external-dependencies}
+
+Some crates require external libraries. For crates from
+[crates.io](https://crates.io), such libraries can be specified in
+`defaultCrateOverrides` package in nixpkgs itself.
+
+Starting from that file, one can add more overrides, to add features
+or build inputs by overriding the hello crate in a separate file.
+
+```nix
+with import <nixpkgs> {};
+((import ./hello.nix).hello {}).override {
+  crateOverrides = defaultCrateOverrides // {
+    hello = attrs: { buildInputs = [ openssl ]; };
+  };
+}
+```
+
+Here, `crateOverrides` is expected to be a attribute set, where the
+key is the crate name without version number and the value a function.
+The function gets all attributes passed to `buildRustCrate` as first
+argument and returns a set that contains all attribute that should be
+overwritten.
+
+For more complicated cases, such as when parts of the crate's
+derivation depend on the crate's version, the `attrs` argument of
+the override above can be read, as in the following example, which
+patches the derivation:
+
+```nix
+with import <nixpkgs> {};
+((import ./hello.nix).hello {}).override {
+  crateOverrides = defaultCrateOverrides // {
+    hello = attrs: lib.optionalAttrs (lib.versionAtLeast attrs.version "1.0")  {
+      postPatch = ''
+        substituteInPlace lib/zoneinfo.rs \
+          --replace-fail "/usr/share/zoneinfo" "${tzdata}/share/zoneinfo"
+      '';
+    };
+  };
+}
+```
+
+Another situation is when we want to override a nested
+dependency. This actually works in the exact same way, since the
+`crateOverrides` parameter is forwarded to the crate's
+dependencies. For instance, to override the build inputs for crate
+`libc` in the example above, where `libc` is a dependency of the main
+crate, we could do:
+
+```nix
+with import <nixpkgs> {};
+((import hello.nix).hello {}).override {
+  crateOverrides = defaultCrateOverrides // {
+    libc = attrs: { buildInputs = []; };
+  };
+}
+```
+
+### Options and phases configuration {#options-and-phases-configuration}
+
+Actually, the overrides introduced in the previous section are more
+general. A number of other parameters can be overridden:
+
+- The version of `rustc` used to compile the crate:
+
+  ```nix
+  (hello {}).override { rust = pkgs.rust; }
+  ```
+
+- Whether to build in release mode or debug mode (release mode by
+  default):
+
+  ```nix
+  (hello {}).override { release = false; }
+  ```
+
+- Whether to print the commands sent to `rustc` when building
+  (equivalent to `--verbose` in cargo:
+
+  ```nix
+  (hello {}).override { verbose = false; }
+  ```
+
+- Extra arguments to be passed to `rustc`:
+
+  ```nix
+  (hello {}).override { extraRustcOpts = "-Z debuginfo=2"; }
+  ```
+
+- Phases, just like in any other derivation, can be specified using
+  the following attributes: `preUnpack`, `postUnpack`, `prePatch`,
+  `patches`, `postPatch`, `preConfigure` (in the case of a Rust crate,
+  this is run before calling the "build" script), `postConfigure`
+  (after the "build" script),`preBuild`, `postBuild`, `preInstall` and
+  `postInstall`. As an example, here is how to create a new module
+  before running the build script:
+
+  ```nix
+  (hello {}).override {
+    preConfigure = ''
+       echo "pub const PATH=\"${hi.out}\";" >> src/path.rs"
+    '';
+  }
+  ```
+
+### Setting Up `nix-shell` {#setting-up-nix-shell}
+
+Oftentimes you want to develop code from within `nix-shell`. Unfortunately
+`buildRustCrate` does not support common `nix-shell` operations directly
+(see [this issue](https://github.com/NixOS/nixpkgs/issues/37945))
+so we will use `stdenv.mkDerivation` instead.
+
+Using the example `hello` project above, we want to do the following:
+
+- Have access to `cargo` and `rustc`
+- Have the `openssl` library available to a crate through it's _normal_
+  compilation mechanism (`pkg-config`).
+
+A typical `shell.nix` might look like:
+
+```nix
+with import <nixpkgs> {};
+
+stdenv.mkDerivation {
+  name = "rust-env";
+  nativeBuildInputs = [
+    rustc cargo
+
+    # Example Build-time Additional Dependencies
+    pkg-config
+  ];
+  buildInputs = [
+    # Example Run-time Additional Dependencies
+    openssl
+  ];
+
+  # Set Environment Variables
+  RUST_BACKTRACE = 1;
+}
+```
+
+You should now be able to run the following:
+
+```ShellSession
+$ nix-shell --pure
+$ cargo build
+$ cargo test
+```
+
+## Using community maintained Rust toolchains {#using-community-maintained-rust-toolchains}
+
+::: {.note}
+The following projects cannot be used within Nixpkgs since [Import From Derivation](https://nixos.org/manual/nix/unstable/language/import-from-derivation) (IFD) is disallowed in Nixpkgs.
+To package things that require Rust nightly, `RUSTC_BOOTSTRAP = true;` can sometimes be used as a hack.
+:::
+
+There are two community maintained approaches to Rust toolchain management:
+- [oxalica's Rust overlay](https://github.com/oxalica/rust-overlay)
+- [fenix](https://github.com/nix-community/fenix)
+
+Despite their names, both projects provides a similar set of packages and overlays under different APIs.
+
+Oxalica's overlay allows you to select a particular Rust version without you providing a hash or a flake input,
+but comes with a larger git repository than fenix.
+
+Fenix also provides rust-analyzer nightly in addition to the Rust toolchains.
+
+Both oxalica's overlay and fenix better integrate with nix and cache optimizations.
+Because of this and ergonomics, either of those community projects
+should be preferred to the Mozilla's Rust overlay ([nixpkgs-mozilla](https://github.com/mozilla/nixpkgs-mozilla)).
+
+The following documentation demonstrates examples using fenix and oxalica's Rust overlay
+with `nix-shell` and building derivations. More advanced usages like flake usage
+are documented in their own repositories.
+
+### Using Rust nightly with `nix-shell` {#using-rust-nightly-with-nix-shell}
+
+Here is a simple `shell.nix` that provides Rust nightly (default profile) using fenix:
+
+```nix
+with import <nixpkgs> { };
+let
+  fenix = callPackage
+    (fetchFromGitHub {
+      owner = "nix-community";
+      repo = "fenix";
+      # commit from: 2023-03-03
+      rev = "e2ea04982b892263c4d939f1cc3bf60a9c4deaa1";
+      hash = "sha256-AsOim1A8KKtMWIxG+lXh5Q4P2bhOZjoUhFWJ1EuZNNk=";
+    })
+    { };
+in
+mkShell {
+  name = "rust-env";
+  nativeBuildInputs = [
+    # Note: to use stable, just replace `default` with `stable`
+    fenix.default.toolchain
+
+    # Example Build-time Additional Dependencies
+    pkg-config
+  ];
+  buildInputs = [
+    # Example Run-time Additional Dependencies
+    openssl
+  ];
+
+  # Set Environment Variables
+  RUST_BACKTRACE = 1;
+}
+```
+
+Save this to `shell.nix`, then run:
+
+```ShellSession
+$ rustc --version
+rustc 1.69.0-nightly (13471d3b2 2023-03-02)
+```
+
+To see that you are using nightly.
+
+Oxalica's Rust overlay has more complete examples of `shell.nix` (and cross compilation) under its
+[`examples` directory](https://github.com/oxalica/rust-overlay/tree/e53e8853aa7b0688bc270e9e6a681d22e01cf299/examples).
+
+### Using Rust nightly in a derivation with `buildRustPackage` {#using-rust-nightly-in-a-derivation-with-buildrustpackage}
+
+You can also use Rust nightly to build rust packages using `makeRustPlatform`.
+The below snippet demonstrates invoking `buildRustPackage` with a Rust toolchain from oxalica's overlay:
+
+```nix
+with import <nixpkgs>
+{
+  overlays = [
+    (import (fetchTarball "https://github.com/oxalica/rust-overlay/archive/master.tar.gz"))
+  ];
+};
+let
+  rustPlatform = makeRustPlatform {
+    cargo = rust-bin.selectLatestNightlyWith (toolchain: toolchain.default);
+    rustc = rust-bin.selectLatestNightlyWith (toolchain: toolchain.default);
+  };
+in
+
+rustPlatform.buildRustPackage rec {
+  pname = "ripgrep";
+  version = "12.1.1";
+
+  src = fetchFromGitHub {
+    owner = "BurntSushi";
+    repo = "ripgrep";
+    rev = version;
+    hash = "sha256-+s5RBC3XSgb8omTbUNLywZnP6jSxZBKSS1BmXOjRF8M=";
+  };
+
+  cargoHash = "sha256-l1vL2ZdtDRxSGvP0X/l3nMw8+6WF67KPutJEzUROjg8=";
+
+  doCheck = false;
+
+  meta = {
+    description = "A fast line-oriented regex search tool, similar to ag and ack";
+    homepage = "https://github.com/BurntSushi/ripgrep";
+    license = with lib.licenses; [ mit unlicense ];
+    maintainers = with lib.maintainers; [];
+  };
+}
+```
+
+Follow the below steps to try that snippet.
+1. save the above snippet as `default.nix` in that directory
+2. cd into that directory and run `nix-build`
+
+Fenix also has examples with `buildRustPackage`,
+[crane](https://github.com/ipetkov/crane),
+[naersk](https://github.com/nix-community/naersk),
+and cross compilation in its [Examples](https://github.com/nix-community/fenix#examples) section.
+
+## Using `git bisect` on the Rust compiler {#using-git-bisect-on-the-rust-compiler}
+
+Sometimes an upgrade of the Rust compiler (`rustc`) will break a
+downstream package.  In these situations, being able to `git bisect`
+the `rustc` version history to find the offending commit is quite
+useful.  Nixpkgs makes it easy to do this.
+
+First, roll back your nixpkgs to a commit in which its `rustc` used
+*the most recent one which doesn't have the problem.*  You'll need
+to do this because of `rustc`'s extremely aggressive
+version-pinning.
+
+Next, add the following overlay, updating the Rust version to the
+one in your rolled-back nixpkgs, and replacing `/git/scratch/rust`
+with the path into which you have `git clone`d the `rustc` git
+repository:
+
+```nix
+ (final: prev: /*lib.optionalAttrs prev.stdenv.targetPlatform.isAarch64*/ {
+   rust_1_72 =
+     lib.updateManyAttrsByPath [{
+       path = [ "packages" "stable" ];
+       update = old: old.overrideScope(final: prev: {
+         rustc-unwrapped = prev.rustc-unwrapped.overrideAttrs (_: {
+           src = lib.cleanSource /git/scratch/rust;
+           # do *not* put passthru.isReleaseTarball=true here
+         });
+       });
+     }]
+       prev.rust_1_72;
+ })
+```
+
+If the problem you're troubleshooting only manifests when
+cross-compiling you can uncomment the `lib.optionalAttrs` in the
+example above, and replace `isAarch64` with the target that is
+having problems.  This will speed up your bisect quite a bit, since
+the host compiler won't need to be rebuilt.
+
+Now, you can start a `git bisect` in the directory where you checked
+out the `rustc` source code.  It is recommended to select the
+endpoint commits by searching backwards from `origin/master` for the
+*commits which added the release notes for the versions in
+question.*  If you set the endpoints to commits on the release
+branches (i.e. the release tags), git-bisect will often get confused
+by the complex merge-commit structures it will need to traverse.
+
+The command loop you'll want to use for bisecting looks like this:
+
+```bash
+git bisect {good,bad}  # depending on result of last build
+git submodule update --init
+CARGO_NET_OFFLINE=false cargo vendor \
+  --sync ./src/tools/cargo/Cargo.toml \
+  --sync ./src/tools/rust-analyzer/Cargo.toml \
+  --sync ./compiler/rustc_codegen_cranelift/Cargo.toml \
+  --sync ./src/bootstrap/Cargo.toml
+nix-build $NIXPKGS -A package-broken-by-rust-changes
+```
+
+The `git submodule update --init` and `cargo vendor` commands above
+require network access, so they can't be performed from within the
+`rustc` derivation, unfortunately.
diff --git a/nixpkgs/doc/languages-frameworks/swift.section.md b/nixpkgs/doc/languages-frameworks/swift.section.md
new file mode 100644
index 000000000000..88d98deeb2dd
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/swift.section.md
@@ -0,0 +1,184 @@
+# Swift {#swift}
+
+The Swift compiler is provided by the `swift` package:
+
+```sh
+# Compile and link a simple executable.
+nix-shell -p swift --run 'swiftc -' <<< 'print("Hello world!")'
+# Run it!
+./main
+```
+
+The `swift` package also provides the `swift` command, with some caveats:
+
+- Swift Package Manager (SwiftPM) is packaged separately as `swiftpm`. If you
+  need functionality like `swift build`, `swift run`, `swift test`, you must
+  also add the `swiftpm` package to your closure.
+- On Darwin, the `swift repl` command requires an Xcode installation. This is
+  because it uses the system LLDB debugserver, which has special entitlements.
+
+## Module search paths {#ssec-swift-module-search-paths}
+
+Like other toolchains in Nixpkgs, the Swift compiler executables are wrapped
+to help Swift find your application's dependencies in the Nix store. These
+wrappers scan the `buildInputs` of your package derivation for specific
+directories where Swift modules are placed by convention, and automatically
+add those directories to the Swift compiler search paths.
+
+Swift follows different conventions depending on the platform. The wrappers
+look for the following directories:
+
+- On Darwin platforms: `lib/swift/macosx`
+  (If not targeting macOS, replace `macosx` with the Xcode platform name.)
+- On other platforms: `lib/swift/linux/x86_64`
+  (Where `linux` and `x86_64` are from lowercase `uname -sm`.)
+- For convenience, Nixpkgs also adds `lib/swift` to the search path.
+  This can save a bit of work packaging Swift modules, because many Nix builds
+  will produce output for just one target any way.
+
+## Core libraries {#ssec-swift-core-libraries}
+
+In addition to the standard library, the Swift toolchain contains some
+additional 'core libraries' that, on Apple platforms, are normally distributed
+as part of the OS or Xcode. These are packaged separately in Nixpkgs, and can
+be found (for use in `buildInputs`) as:
+
+- `swiftPackages.Dispatch`
+- `swiftPackages.Foundation`
+- `swiftPackages.XCTest`
+
+## Packaging with SwiftPM {#ssec-swift-packaging-with-swiftpm}
+
+Nixpkgs includes a small helper `swiftpm2nix` that can fetch your SwiftPM
+dependencies for you, when you need to write a Nix expression to package your
+application.
+
+The first step is to run the generator:
+
+```sh
+cd /path/to/my/project
+# Enter a Nix shell with the required tools.
+nix-shell -p swift swiftpm swiftpm2nix
+# First, make sure the workspace is up-to-date.
+swift package resolve
+# Now generate the Nix code.
+swiftpm2nix
+```
+
+This produces some files in a directory `nix`, which will be part of your Nix
+expression. The next step is to write that expression:
+
+```nix
+{ stdenv, swift, swiftpm, swiftpm2nix, fetchFromGitHub }:
+
+let
+  # Pass the generated files to the helper.
+  generated = swiftpm2nix.helpers ./nix;
+in
+
+stdenv.mkDerivation rec {
+  pname = "myproject";
+  version = "0.0.0";
+
+  src = fetchFromGitHub {
+    owner = "nixos";
+    repo = pname;
+    rev = version;
+    hash = "sha256-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=";
+  };
+
+  # Including SwiftPM as a nativeBuildInput provides a buildPhase for you.
+  # This by default performs a release build using SwiftPM, essentially:
+  #   swift build -c release
+  nativeBuildInputs = [ swift swiftpm ];
+
+  # The helper provides a configure snippet that will prepare all dependencies
+  # in the correct place, where SwiftPM expects them.
+  configurePhase = generated.configure;
+
+  installPhase = ''
+    # This is a special function that invokes swiftpm to find the location
+    # of the binaries it produced.
+    binPath="$(swiftpmBinPath)"
+    # Now perform any installation steps.
+    mkdir -p $out/bin
+    cp $binPath/myproject $out/bin/
+  '';
+}
+```
+
+### Custom build flags {#ssec-swiftpm-custom-build-flags}
+
+If you'd like to build a different configuration than `release`:
+
+```nix
+{
+  swiftpmBuildConfig = "debug";
+}
+```
+
+It is also possible to provide additional flags to `swift build`:
+
+```nix
+{
+  swiftpmFlags = [ "--disable-dead-strip" ];
+}
+```
+
+The default `buildPhase` already passes `-j` for parallel building.
+
+If these two customization options are insufficient, provide your own
+`buildPhase` that invokes `swift build`.
+
+### Running tests {#ssec-swiftpm-running-tests}
+
+Including `swiftpm` in your `nativeBuildInputs` also provides a default
+`checkPhase`, but it must be enabled with:
+
+```nix
+{
+  doCheck = true;
+}
+```
+
+This essentially runs: `swift test -c release`
+
+### Patching dependencies {#ssec-swiftpm-patching-dependencies}
+
+In some cases, it may be necessary to patch a SwiftPM dependency. SwiftPM
+dependencies are located in `.build/checkouts`, but the `swiftpm2nix` helper
+provides these as symlinks to read-only `/nix/store` paths. In order to patch
+them, we need to make them writable.
+
+A special function `swiftpmMakeMutable` is available to replace the symlink
+with a writable copy:
+
+```nix
+{
+  configurePhase = generated.configure ++ ''
+    # Replace the dependency symlink with a writable copy.
+    swiftpmMakeMutable swift-crypto
+    # Now apply a patch.
+    patch -p1 -d .build/checkouts/swift-crypto -i ${./some-fix.patch}
+  '';
+}
+```
+
+## Considerations for custom build tools {#ssec-swift-considerations-for-custom-build-tools}
+
+### Linking the standard library {#ssec-swift-linking-the-standard-library}
+
+The `swift` package has a separate `lib` output containing just the Swift
+standard library, to prevent Swift applications needing a dependency on the
+full Swift compiler at run-time. Linking with the Nixpkgs Swift toolchain
+already ensures binaries correctly reference the `lib` output.
+
+Sometimes, Swift is used only to compile part of a mixed codebase, and the
+link step is manual. Custom build tools often locate the standard library
+relative to the `swift` compiler executable, and while the result will work,
+when this path ends up in the binary, it will have the Swift compiler as an
+unintended dependency.
+
+In this case, you should investigate how your build process discovers the
+standard library, and override the path. The correct path will be something
+like: `"${swift.swift.lib}/${swift.swiftModuleSubdir}"`
diff --git a/nixpkgs/doc/languages-frameworks/texlive.section.md b/nixpkgs/doc/languages-frameworks/texlive.section.md
new file mode 100644
index 000000000000..b6fb1099a4a4
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/texlive.section.md
@@ -0,0 +1,230 @@
+# TeX Live {#sec-language-texlive}
+
+Since release 15.09 there is a new TeX Live packaging that lives entirely under attribute `texlive`.
+
+## User's guide (experimental new interface) {#sec-language-texlive-user-guide-experimental}
+
+Release 23.11 ships with a new interface that will eventually replace `texlive.combine`.
+
+- For basic usage, use some of the prebuilt environments available at the top level, such as `texliveBasic`, `texliveSmall`. For the full list of prebuilt environments, inspect `texlive.schemes`.
+
+- Packages cannot be used directly but must be assembled in an environment. To create or add packages to an environment, use
+  ```nix
+  texliveSmall.withPackages (ps: with ps; [ collection-langkorean algorithms cm-super ])
+  ```
+  The function `withPackages` can be called multiple times to add more packages.
+
+  - **Note.** Within Nixpkgs, packages should only use prebuilt environments as inputs, such as `texliveSmall` or `texliveInfraOnly`, and should not depend directly on `texlive`. Further dependencies should be added by calling `withPackages`. This is to ensure that there is a consistent and simple way to override the inputs.
+
+- `texlive.withPackages` uses the same logic as `buildEnv`. Only parts of a package are installed in an environment: its 'runtime' files (`tex` output), binaries (`out` output), and support files (`tlpkg` output). Moreover, man and info pages are assembled into separate `man` and `info` outputs. To add only the TeX files of a package, or its documentation (`texdoc` output), just specify the outputs:
+  ```nix
+  texlive.withPackages (ps: with ps; [
+    texdoc # recommended package to navigate the documentation
+    perlPackages.LaTeXML.tex # tex files of LaTeXML, omit binaries
+    cm-super
+    cm-super.texdoc # documentation of cm-super
+  ])
+  ```
+
+- All packages distributed by TeX Live, which contains most of CTAN, are available and can be found under `texlive.pkgs`:
+  ```ShellSession
+  $ nix repl
+  nix-repl> :l <nixpkgs>
+  nix-repl> texlive.pkgs.[TAB]
+  ```
+  Note that the packages in `texlive.pkgs` are only provided for search purposes and must not be used directly.
+
+- **Experimental and subject to change without notice:** to add the documentation for all packages in the environment, use
+  ```nix
+  texliveSmall.__overrideTeXConfig { withDocs = true; }
+  ```
+  This can be applied before or after calling `withPackages`.
+
+  The function currently support the parameters `withDocs`, `withSources`, and `requireTeXPackages`.
+
+## User's guide {#sec-language-texlive-user-guide}
+
+- For basic usage just pull `texlive.combined.scheme-basic` for an environment with basic LaTeX support.
+
+- It typically won't work to use separately installed packages together. Instead, you can build a custom set of packages like this. Most CTAN packages should be available:
+
+  ```nix
+  texlive.combine {
+    inherit (texlive) scheme-small collection-langkorean algorithms cm-super;
+  }
+  ```
+
+- There are all the schemes, collections and a few thousand packages, as defined upstream (perhaps with tiny differences).
+
+- By default you only get executables and files needed during runtime, and a little documentation for the core packages. To change that, you need to add `pkgFilter` function to `combine`.
+
+  ```nix
+  texlive.combine {
+    # inherit (texlive) whatever-you-want;
+    pkgFilter = pkg:
+      pkg.tlType == "run" || pkg.tlType == "bin" || pkg.hasManpages || pkg.pname == "cm-super";
+    # elem tlType [ "run" "bin" "doc" "source" ]
+    # there are also other attributes: version, name
+  }
+  ```
+
+- You can list packages e.g. by `nix repl`.
+
+  ```ShellSession
+  $ nix repl
+  nix-repl> :l <nixpkgs>
+  nix-repl> texlive.collection-[TAB]
+  ```
+
+- Note that the wrapper assumes that the result has a chance to be useful. For example, the core executables should be present, as well as some core data files. The supported way of ensuring this is by including some scheme, for example `scheme-basic`, into the combination.
+
+- TeX Live packages are also available under `texlive.pkgs` as derivations with outputs `out`, `tex`, `texdoc`, `texsource`, `tlpkg`, `man`, `info`. They cannot be installed outside of `texlive.combine` but are available for other uses. To repackage a font, for instance, use
+
+  ```nix
+  stdenvNoCC.mkDerivation rec {
+    src = texlive.pkgs.iwona;
+
+    inherit (src) pname version;
+
+    installPhase = ''
+      runHook preInstall
+      install -Dm644 fonts/opentype/nowacki/iwona/*.otf -t $out/share/fonts/opentype
+      runHook postInstall
+    '';
+  }
+  ```
+
+  See `biber`, `iwona` for complete examples.
+
+## Custom packages {#sec-language-texlive-custom-packages}
+
+You may find that you need to use an external TeX package. A derivation for such package has to provide the contents of the "texmf" directory in its `"tex"` output, according to the [TeX Directory Structure](https://tug.ctan.org/tds/tds.html). Dependencies on other TeX packages can be listed in the attribute `tlDeps`.
+
+The functions `texlive.combine` and `texlive.withPackages` recognise the following outputs:
+
+- `"out"`: contents are linked in the TeX Live environment, and binaries in the `$out/bin` folder are wrapped;
+- `"tex"`: linked in `$TEXMFDIST`; files should follow the TDS (for instance `$tex/tex/latex/foiltex/foiltex.cls`);
+- `"texdoc"`, `"texsource"`: ignored by default, treated as `"tex"`;
+- `"tlpkg"`: linked in `$TEXMFROOT/tlpkg`;
+- `"man"`, `"info"`, ...: the other outputs are combined into separate outputs.
+
+When using `pkgFilter`, `texlive.combine` will assign `tlType` respectively `"bin"`, `"run"`, `"doc"`, `"source"`, `"tlpkg"` to the above outputs.
+
+Here is a (very verbose) example. See also the packages `auctex`, `eukleides`, `mftrace` for more examples.
+
+```nix
+with import <nixpkgs> {};
+
+let
+  foiltex = stdenvNoCC.mkDerivation {
+    pname = "latex-foiltex";
+    version = "2.1.4b";
+
+    outputs = [ "tex" "texdoc" ];
+    passthru.tlDeps = with texlive; [ latex ];
+
+    srcs = [
+      (fetchurl {
+        url = "http://mirrors.ctan.org/macros/latex/contrib/foiltex/foiltex.dtx";
+        hash = "sha256-/2I2xHXpZi0S988uFsGuPV6hhMw8e0U5m/P8myf42R0=";
+      })
+      (fetchurl {
+        url = "http://mirrors.ctan.org/macros/latex/contrib/foiltex/foiltex.ins";
+        hash = "sha256-KTm3pkd+Cpu0nSE2WfsNEa56PeXBaNfx/sOO2Vv0kyc=";
+      })
+    ];
+
+    unpackPhase = ''
+      runHook preUnpack
+
+      for _src in $srcs; do
+        cp "$_src" $(stripHash "$_src")
+      done
+
+      runHook postUnpack
+    '';
+
+    nativeBuildInputs = [
+      (texliveSmall.withPackages (ps: with ps; [ cm-super hypdoc latexmk ]))
+      # multiple-outputs.sh fails if $out is not defined
+      (writeShellScript "force-tex-output.sh" ''
+        out="''${tex-}"
+      '')
+    ];
+
+    dontConfigure = true;
+
+    buildPhase = ''
+      runHook preBuild
+
+      # Generate the style files
+      latex foiltex.ins
+
+      # Generate the documentation
+      export HOME=.
+      latexmk -pdf foiltex.dtx
+
+      runHook postBuild
+    '';
+
+    installPhase = ''
+      runHook preInstall
+
+      path="$tex/tex/latex/foiltex"
+      mkdir -p "$path"
+      cp *.{cls,def,clo,sty} "$path/"
+
+      path="$texdoc/doc/tex/latex/foiltex"
+      mkdir -p "$path"
+      cp *.pdf "$path/"
+
+      runHook postInstall
+    '';
+
+    meta = {
+      description = "A LaTeX2e class for overhead transparencies";
+      license = lib.licenses.unfreeRedistributable;
+      maintainers = with lib.maintainers; [ veprbl ];
+      platforms = lib.platforms.all;
+    };
+  };
+
+  latex_with_foiltex = texliveSmall.withPackages (_: [ foiltex ]);
+in
+  runCommand "test.pdf" {
+    nativeBuildInputs = [ latex_with_foiltex ];
+  } ''
+cat >test.tex <<EOF
+\documentclass{foils}
+
+\title{Presentation title}
+\date{}
+
+\begin{document}
+\maketitle
+\end{document}
+EOF
+  pdflatex test.tex
+  cp test.pdf $out
+''
+```
+
+## LuaLaTeX font cache {#sec-language-texlive-lualatex-font-cache}
+
+The font cache for LuaLaTeX is written to `$HOME`.
+Therefore, it is necessary to set `$HOME` to a writable path, e.g. [before using LuaLaTeX in nix derivations](https://github.com/NixOS/nixpkgs/issues/180639):
+```nix
+runCommandNoCC "lualatex-hello-world" {
+  buildInputs = [ texliveFull ];
+} ''
+  mkdir $out
+  echo '\documentclass{article} \begin{document} Hello world \end{document}' > main.tex
+  env HOME=$(mktemp -d) lualatex  -interaction=nonstopmode -output-format=pdf -output-directory=$out ./main.tex
+''
+```
+
+Additionally, [the cache of a user can diverge from the nix store](https://github.com/NixOS/nixpkgs/issues/278718).
+To resolve font issues that might follow, the cache can be removed by the user:
+```ShellSession
+luaotfload-tool --cache=erase --flush-lookups --force
+```
diff --git a/nixpkgs/doc/languages-frameworks/titanium.section.md b/nixpkgs/doc/languages-frameworks/titanium.section.md
new file mode 100644
index 000000000000..306ad8662767
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/titanium.section.md
@@ -0,0 +1,110 @@
+# Titanium {#titanium}
+
+The Nixpkgs repository contains facilities to deploy a variety of versions of
+the [Titanium SDK](https://www.appcelerator.com) versions, a cross-platform
+mobile app development framework using JavaScript as an implementation language,
+and includes a function abstraction making it possible to build Titanium
+applications for Android and iOS devices from source code.
+
+Not all Titanium features supported -- currently, it can only be used to build
+Android and iOS apps.
+
+## Building a Titanium app {#building-a-titanium-app}
+
+We can build a Titanium app from source for Android or iOS and for debugging or
+release purposes by invoking the `titaniumenv.buildApp {}` function:
+
+```nix
+titaniumenv.buildApp {
+  name = "myapp";
+  src = ./myappsource;
+
+  preBuild = "";
+  target = "android"; # or 'iphone'
+  tiVersion = "7.1.0.GA";
+  release = true;
+
+  androidsdkArgs = {
+    platformVersions = [ "25" "26" ];
+  };
+  androidKeyStore = ./keystore;
+  androidKeyAlias = "myfirstapp";
+  androidKeyStorePassword = "secret";
+
+  xcodeBaseDir = "/Applications/Xcode.app";
+  xcodewrapperArgs = {
+    version = "9.3";
+  };
+  iosMobileProvisioningProfile = ./myprovisioning.profile;
+  iosCertificateName = "My Company";
+  iosCertificate = ./mycertificate.p12;
+  iosCertificatePassword = "secret";
+  iosVersion = "11.3";
+  iosBuildStore = false;
+
+  enableWirelessDistribution = true;
+  installURL = "/installipa.php";
+}
+```
+
+The `titaniumenv.buildApp {}` function takes the following parameters:
+
+* The `name` parameter refers to the name in the Nix store.
+* The `src` parameter refers to the source code location of the app that needs
+  to be built.
+* `preRebuild` contains optional build instructions that are carried out before
+  the build starts.
+* `target` indicates for which device the app must be built. Currently only
+  'android' and 'iphone' (for iOS) are supported.
+* `tiVersion` can be used to optionally override the requested Titanium version
+  in `tiapp.xml`. If not specified, it will use the version in `tiapp.xml`.
+* `release` should be set to true when building an app for submission to the
+  Google Playstore or Apple Appstore. Otherwise, it should be false.
+
+When the `target` has been set to `android`, we can configure the following
+parameters:
+
+* The `androidSdkArgs` parameter refers to an attribute set that propagates all
+  parameters to the `androidenv.composeAndroidPackages {}` function. This can
+  be used to install all relevant Android plugins that may be needed to perform
+  the Android build. If no parameters are given, it will deploy the platform
+  SDKs for API-levels 25 and 26 by default.
+
+When the `release` parameter has been set to true, you need to provide
+parameters to sign the app:
+
+* `androidKeyStore` is the path to the keystore file
+* `androidKeyAlias` is the key alias
+* `androidKeyStorePassword` refers to the password to open the keystore file.
+
+When the `target` has been set to `iphone`, we can configure the following
+parameters:
+
+* The `xcodeBaseDir` parameter refers to the location where Xcode has been
+  installed. When none value is given, the above value is the default.
+* The `xcodewrapperArgs` parameter passes arbitrary parameters to the
+  `xcodeenv.composeXcodeWrapper {}` function. This can, for example, be used
+  to adjust the default version of Xcode.
+
+When `release` has been set to true, you also need to provide the following
+parameters:
+
+* `iosMobileProvisioningProfile` refers to a mobile provisioning profile needed
+  for signing.
+* `iosCertificateName` refers to the company name in the P12 certificate.
+* `iosCertificate` refers to the path to the P12 file.
+* `iosCertificatePassword` contains the password to open the P12 file.
+* `iosVersion` refers to the iOS SDK version to use. It defaults to the latest
+  version.
+* `iosBuildStore` should be set to `true` when building for the Apple Appstore
+  submission. For enterprise or ad-hoc builds it should be set to `false`.
+
+When `enableWirelessDistribution` has been enabled, you must also provide the
+path of the PHP script (`installURL`) (that is included with the iOS build
+environment) to enable wireless ad-hoc installations.
+
+## Emulating or simulating the app {#emulating-or-simulating-the-app}
+
+It is also possible to simulate the correspond iOS simulator build by using
+`xcodeenv.simulateApp {}` and emulate an Android APK by using
+`androidenv.emulateApp {}`.
diff --git a/nixpkgs/doc/languages-frameworks/vim.section.md b/nixpkgs/doc/languages-frameworks/vim.section.md
new file mode 100644
index 000000000000..69031ccbd340
--- /dev/null
+++ b/nixpkgs/doc/languages-frameworks/vim.section.md
@@ -0,0 +1,275 @@
+# Vim {#vim}
+
+Both Neovim and Vim can be configured to include your favorite plugins
+and additional libraries.
+
+Loading can be deferred; see examples.
+
+At the moment we support two different methods for managing plugins:
+
+- Vim packages (*recommended*)
+- vim-plug (vim only)
+
+Right now two Vim packages are available: `vim` which has most features that require extra
+dependencies disabled and `vim-full` which has them configurable and enabled by default.
+
+::: {.note}
+`vim_configurable` is a deprecated alias for `vim-full` and refers to the fact that its
+build-time features are configurable. It has nothing to do with user configuration,
+and both the `vim` and `vim-full` packages can be customized as explained in the next section.
+:::
+
+## Custom configuration {#custom-configuration}
+
+Adding custom .vimrc lines can be done using the following code:
+
+```nix
+vim-full.customize {
+  # `name` optionally specifies the name of the executable and package
+  name = "vim-with-plugins";
+
+  vimrcConfig.customRC = ''
+    set hidden
+  '';
+}
+```
+
+This configuration is used when Vim is invoked with the command specified as name, in this case `vim-with-plugins`.
+You can also omit `name` to customize Vim itself. See the
+[definition of `vimUtils.makeCustomizable`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/vim-utils.nix#L408)
+for all supported options.
+
+For Neovim the `configure` argument can be overridden to achieve the same:
+
+```nix
+neovim.override {
+  configure = {
+    customRC = ''
+      # here your custom configuration goes!
+    '';
+  };
+}
+```
+
+If you want to use `neovim-qt` as a graphical editor, you can configure it by overriding Neovim in an overlay
+or passing it an overridden Neovim:
+
+```nix
+neovim-qt.override {
+  neovim = neovim.override {
+    configure = {
+      customRC = ''
+        # your custom configuration
+      '';
+    };
+  };
+}
+```
+
+## Managing plugins with Vim packages {#managing-plugins-with-vim-packages}
+
+To store your plugins in Vim packages (the native Vim plugin manager, see `:help packages`) the following example can be used:
+
+```nix
+vim-full.customize {
+  vimrcConfig.packages.myVimPackage = with pkgs.vimPlugins; {
+    # loaded on launch
+    start = [ youcompleteme fugitive ];
+    # manually loadable by calling `:packadd $plugin-name`
+    # however, if a Vim plugin has a dependency that is not explicitly listed in
+    # opt that dependency will always be added to start to avoid confusion.
+    opt = [ phpCompletion elm-vim ];
+    # To automatically load a plugin when opening a filetype, add vimrc lines like:
+    # autocmd FileType php :packadd phpCompletion
+  };
+}
+```
+
+`myVimPackage` is an arbitrary name for the generated package. You can choose any name you like.
+For Neovim the syntax is:
+
+```nix
+neovim.override {
+  configure = {
+    customRC = ''
+      # here your custom configuration goes!
+    '';
+    packages.myVimPackage = with pkgs.vimPlugins; {
+      # see examples below how to use custom packages
+      start = [ ];
+      # If a Vim plugin has a dependency that is not explicitly listed in
+      # opt that dependency will always be added to start to avoid confusion.
+      opt = [ ];
+    };
+  };
+}
+```
+
+The resulting package can be added to `packageOverrides` in `~/.nixpkgs/config.nix` to make it installable:
+
+```nix
+{
+  packageOverrides = pkgs: with pkgs; {
+    myVim = vim-full.customize {
+      # `name` specifies the name of the executable and package
+      name = "vim-with-plugins";
+      # add here code from the example section
+    };
+    myNeovim = neovim.override {
+      configure = {
+      # add code from the example section here
+      };
+    };
+  };
+}
+```
+
+After that you can install your special grafted `myVim` or `myNeovim` packages.
+
+### What if your favourite Vim plugin isn’t already packaged? {#what-if-your-favourite-vim-plugin-isnt-already-packaged}
+
+If one of your favourite plugins isn't packaged, you can package it yourself:
+
+```nix
+{ config, pkgs, ... }:
+
+let
+  easygrep = pkgs.vimUtils.buildVimPlugin {
+    name = "vim-easygrep";
+    src = pkgs.fetchFromGitHub {
+      owner = "dkprice";
+      repo = "vim-easygrep";
+      rev = "d0c36a77cc63c22648e792796b1815b44164653a";
+      hash = "sha256-bL33/S+caNmEYGcMLNCanFZyEYUOUmSsedCVBn4tV3g=";
+    };
+  };
+in
+{
+  environment.systemPackages = [
+    (
+      pkgs.neovim.override {
+        configure = {
+          packages.myPlugins = with pkgs.vimPlugins; {
+          start = [
+            vim-go # already packaged plugin
+            easygrep # custom package
+          ];
+          opt = [];
+        };
+        # ...
+      };
+     }
+    )
+  ];
+}
+```
+
+If your package requires building specific parts, use instead `pkgs.vimUtils.buildVimPlugin`.
+
+### Specificities for some plugins {#vim-plugin-specificities}
+#### Treesitter {#vim-plugin-treesitter}
+
+By default `nvim-treesitter` encourages you to download, compile and install
+the required Treesitter grammars at run time with `:TSInstall`. This works
+poorly on NixOS.  Instead, to install the `nvim-treesitter` plugins with a set
+of precompiled grammars, you can use `nvim-treesitter.withPlugins` function:
+
+```nix
+(pkgs.neovim.override {
+  configure = {
+    packages.myPlugins = with pkgs.vimPlugins; {
+      start = [
+        (nvim-treesitter.withPlugins (
+          plugins: with plugins; [
+            nix
+            python
+          ]
+        ))
+      ];
+    };
+  };
+})
+```
+
+To enable all grammars packaged in nixpkgs, use `pkgs.vimPlugins.nvim-treesitter.withAllGrammars`.
+
+## Managing plugins with vim-plug {#managing-plugins-with-vim-plug}
+
+To use [vim-plug](https://github.com/junegunn/vim-plug) to manage your Vim
+plugins the following example can be used:
+
+```nix
+vim-full.customize {
+  vimrcConfig.packages.myVimPackage = with pkgs.vimPlugins; {
+    # loaded on launch
+    plug.plugins = [ youcompleteme fugitive phpCompletion elm-vim ];
+  };
+}
+```
+
+Note: this is not possible anymore for Neovim.
+
+
+## Adding new plugins to nixpkgs {#adding-new-plugins-to-nixpkgs}
+
+Nix expressions for Vim plugins are stored in [pkgs/applications/editors/vim/plugins](https://github.com/NixOS/nixpkgs/tree/master/pkgs/applications/editors/vim/plugins). For the vast majority of plugins, Nix expressions are automatically generated by running [`nix-shell -p vimPluginsUpdater --run vim-plugins-updater`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/updater.nix). This creates a [generated.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/generated.nix) file based on the plugins listed in [vim-plugin-names](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/vim-plugin-names).
+
+After running the updater, if nvim-treesitter received an update, also run [`nvim-treesitter/update.py`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/update.py) to update the tree sitter grammars for `nvim-treesitter`.
+
+Some plugins require overrides in order to function properly. Overrides are placed in [overrides.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/overrides.nix). Overrides are most often required when a plugin requires some dependencies, or extra steps are required during the build process. For example `deoplete-fish` requires both `deoplete-nvim` and `vim-fish`, and so the following override was added:
+
+```nix
+{
+  deoplete-fish = super.deoplete-fish.overrideAttrs(old: {
+    dependencies = with super; [ deoplete-nvim vim-fish ];
+  });
+}
+```
+
+Sometimes plugins require an override that must be changed when the plugin is updated. This can cause issues when Vim plugins are auto-updated but the associated override isn't updated. For these plugins, the override should be written so that it specifies all information required to install the plugin, and running `./update.py` doesn't change the derivation for the plugin. Manually updating the override is required to update these types of plugins. An example of such a plugin is `LanguageClient-neovim`.
+
+To add a new plugin, run `./update.py add "[owner]/[name]"`. **NOTE**: This script automatically commits to your git repository. Be sure to check out a fresh branch before running.
+
+Finally, there are some plugins that are also packaged in nodePackages because they have Javascript-related build steps, such as running webpack. Those plugins are not listed in `vim-plugin-names` or managed by `update.py` at all, and are included separately in `overrides.nix`. Currently, all these plugins are related to the `coc.nvim` ecosystem of the Language Server Protocol integration with Vim/Neovim.
+
+## Updating plugins in nixpkgs {#updating-plugins-in-nixpkgs}
+
+Run the update script with a GitHub API token that has at least `public_repo` access. Running the script without the token is likely to result in rate-limiting (429 errors). For steps on creating an API token, please refer to [GitHub's token documentation](https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/creating-a-personal-access-token).
+
+```sh
+GITHUB_API_TOKEN=my_token ./pkgs/applications/editors/vim/plugins/update.py
+```
+
+Alternatively, set the number of processes to a lower count to avoid rate-limiting.
+
+```sh
+
+nix-shell -p vimPluginsUpdater --run 'vim-plugins-updater --proc 1'
+```
+
+## How to maintain an out-of-tree overlay of vim plugins ? {#vim-out-of-tree-overlays}
+
+You can use the updater script to generate basic packages out of a custom vim
+plugin list:
+
+```
+nix-shell -p vimPluginsUpdater --run vim-plugins-updater -i vim-plugin-names -o generated.nix --no-commit
+```
+
+with the contents of `vim-plugin-names` being for example:
+
+```
+repo,branch,alias
+pwntester/octo.nvim,,
+```
+
+You can then reference the generated vim plugins via:
+
+```nix
+{
+  myVimPlugins = pkgs.vimPlugins.extend (
+    (pkgs.callPackage ./generated.nix {})
+  );
+}
+```
+
diff --git a/nixpkgs/doc/lib.md b/nixpkgs/doc/lib.md
new file mode 100644
index 000000000000..2c3105333ed0
--- /dev/null
+++ b/nixpkgs/doc/lib.md
@@ -0,0 +1,6 @@
+# Nixpkgs `lib` {#id-1.4}
+
+```{=include=} chapters
+functions.md
+module-system/module-system.chapter.md
+```
diff --git a/nixpkgs/doc/manpage-urls.json b/nixpkgs/doc/manpage-urls.json
new file mode 100644
index 000000000000..2cc03af4360f
--- /dev/null
+++ b/nixpkgs/doc/manpage-urls.json
@@ -0,0 +1,324 @@
+{
+  "gnunet.conf(5)": "https://docs.gnunet.org/latest/users/configuration.html",
+  "mpd(1)": "https://mpd.readthedocs.io/en/latest/mpd.1.html",
+  "mpd.conf(5)": "https://mpd.readthedocs.io/en/latest/mpd.conf.5.html",
+  "nix.conf(5)": "https://nixos.org/manual/nix/stable/command-ref/conf-file.html",
+
+  "portals.conf(5)": "https://github.com/flatpak/xdg-desktop-portal/blob/1.18.1/doc/portals.conf.rst.in",
+
+  "bootctl(1)": "https://www.freedesktop.org/software/systemd/man/bootctl.html",
+  "busctl(1)": "https://www.freedesktop.org/software/systemd/man/busctl.html",
+  "coredumpctl(1)": "https://www.freedesktop.org/software/systemd/man/coredumpctl.html",
+  "homectl(1)": "https://www.freedesktop.org/software/systemd/man/homectl.html",
+  "hostnamectl(1)": "https://www.freedesktop.org/software/systemd/man/hostnamectl.html",
+  "init(1)": "https://www.freedesktop.org/software/systemd/man/init.html",
+  "journalctl(1)": "https://www.freedesktop.org/software/systemd/man/journalctl.html",
+  "localectl(1)": "https://www.freedesktop.org/software/systemd/man/localectl.html",
+  "loginctl(1)": "https://www.freedesktop.org/software/systemd/man/loginctl.html",
+  "machinectl(1)": "https://www.freedesktop.org/software/systemd/man/machinectl.html",
+  "mount.ddi(1)": "https://www.freedesktop.org/software/systemd/man/mount.ddi.html",
+  "networkctl(1)": "https://www.freedesktop.org/software/systemd/man/networkctl.html",
+  "oomctl(1)": "https://www.freedesktop.org/software/systemd/man/oomctl.html",
+  "portablectl(1)": "https://www.freedesktop.org/software/systemd/man/portablectl.html",
+  "resolvconf(1)": "https://www.freedesktop.org/software/systemd/man/resolvconf.html",
+  "resolvectl(1)": "https://www.freedesktop.org/software/systemd/man/resolvectl.html",
+  "systemctl(1)": "https://www.freedesktop.org/software/systemd/man/systemctl.html",
+  "systemd-ac-power(1)": "https://www.freedesktop.org/software/systemd/man/systemd-ac-power.html",
+  "systemd-analyze(1)": "https://www.freedesktop.org/software/systemd/man/systemd-analyze.html",
+  "systemd-ask-password(1)": "https://www.freedesktop.org/software/systemd/man/systemd-ask-password.html",
+  "systemd-cat(1)": "https://www.freedesktop.org/software/systemd/man/systemd-cat.html",
+  "systemd-cgls(1)": "https://www.freedesktop.org/software/systemd/man/systemd-cgls.html",
+  "systemd-cgtop(1)": "https://www.freedesktop.org/software/systemd/man/systemd-cgtop.html",
+  "systemd-creds(1)": "https://www.freedesktop.org/software/systemd/man/systemd-creds.html",
+  "systemd-cryptenroll(1)": "https://www.freedesktop.org/software/systemd/man/systemd-cryptenroll.html",
+  "systemd-delta(1)": "https://www.freedesktop.org/software/systemd/man/systemd-delta.html",
+  "systemd-detect-virt(1)": "https://www.freedesktop.org/software/systemd/man/systemd-detect-virt.html",
+  "systemd-dissect(1)": "https://www.freedesktop.org/software/systemd/man/systemd-dissect.html",
+  "systemd-escape(1)": "https://www.freedesktop.org/software/systemd/man/systemd-escape.html",
+  "systemd-id128(1)": "https://www.freedesktop.org/software/systemd/man/systemd-id128.html",
+  "systemd-inhibit(1)": "https://www.freedesktop.org/software/systemd/man/systemd-inhibit.html",
+  "systemd-machine-id-setup(1)": "https://www.freedesktop.org/software/systemd/man/systemd-machine-id-setup.html",
+  "systemd-measure(1)": "https://www.freedesktop.org/software/systemd/man/systemd-measure.html",
+  "systemd-mount(1)": "https://www.freedesktop.org/software/systemd/man/systemd-mount.html",
+  "systemd-notify(1)": "https://www.freedesktop.org/software/systemd/man/systemd-notify.html",
+  "systemd-nspawn(1)": "https://www.freedesktop.org/software/systemd/man/systemd-nspawn.html",
+  "systemd-path(1)": "https://www.freedesktop.org/software/systemd/man/systemd-path.html",
+  "systemd-run(1)": "https://www.freedesktop.org/software/systemd/man/systemd-run.html",
+  "systemd-socket-activate(1)": "https://www.freedesktop.org/software/systemd/man/systemd-socket-activate.html",
+  "systemd-stdio-bridge(1)": "https://www.freedesktop.org/software/systemd/man/systemd-stdio-bridge.html",
+  "systemd-tty-ask-password-agent(1)": "https://www.freedesktop.org/software/systemd/man/systemd-tty-ask-password-agent.html",
+  "systemd-umount(1)": "https://www.freedesktop.org/software/systemd/man/systemd-umount.html",
+  "systemd(1)": "https://www.freedesktop.org/software/systemd/man/systemd.html",
+  "timedatectl(1)": "https://www.freedesktop.org/software/systemd/man/timedatectl.html",
+  "userdbctl(1)": "https://www.freedesktop.org/software/systemd/man/userdbctl.html",
+  "binfmt.d(5)": "https://www.freedesktop.org/software/systemd/man/binfmt.d.html",
+  "coredump.conf(5)": "https://www.freedesktop.org/software/systemd/man/coredump.conf.html",
+  "coredump.conf.d(5)": "https://www.freedesktop.org/software/systemd/man/coredump.conf.d.html",
+  "crypttab(5)": "https://www.freedesktop.org/software/systemd/man/crypttab.html",
+  "dnssec-trust-anchors.d(5)": "https://www.freedesktop.org/software/systemd/man/dnssec-trust-anchors.d.html",
+  "environment.d(5)": "https://www.freedesktop.org/software/systemd/man/environment.d.html",
+  "extension-release(5)": "https://www.freedesktop.org/software/systemd/man/extension-release.html",
+  "homed.conf(5)": "https://www.freedesktop.org/software/systemd/man/homed.conf.html",
+  "homed.conf.d(5)": "https://www.freedesktop.org/software/systemd/man/homed.conf.d.html",
+  "hostname(5)": "https://www.freedesktop.org/software/systemd/man/hostname.html",
+  "initrd-release(5)": "https://www.freedesktop.org/software/systemd/man/initrd-release.html",
+  "integritytab(5)": "https://www.freedesktop.org/software/systemd/man/integritytab.html",
+  "iocost.conf(5)": "https://www.freedesktop.org/software/systemd/man/iocost.conf.html",
+  "journal-remote.conf(5)": "https://www.freedesktop.org/software/systemd/man/journal-remote.conf.html",
+  "journal-remote.conf.d(5)": "https://www.freedesktop.org/software/systemd/man/journal-remote.conf.d.html",
+  "journal-upload.conf(5)": "https://www.freedesktop.org/software/systemd/man/journal-upload.conf.html",
+  "journal-upload.conf.d(5)": "https://www.freedesktop.org/software/systemd/man/journal-upload.conf.d.html",
+  "journald.conf(5)": "https://www.freedesktop.org/software/systemd/man/journald.conf.html",
+  "journald.conf.d(5)": "https://www.freedesktop.org/software/systemd/man/journald.conf.d.html",
+  "journald@.conf(5)": "https://www.freedesktop.org/software/systemd/man/journald@.conf.html",
+  "loader.conf(5)": "https://www.freedesktop.org/software/systemd/man/loader.conf.html",
+  "locale.conf(5)": "https://www.freedesktop.org/software/systemd/man/locale.conf.html",
+  "localtime(5)": "https://www.freedesktop.org/software/systemd/man/localtime.html",
+  "logind.conf(5)": "https://www.freedesktop.org/software/systemd/man/logind.conf.html",
+  "logind.conf.d(5)": "https://www.freedesktop.org/software/systemd/man/logind.conf.d.html",
+  "machine-id(5)": "https://www.freedesktop.org/software/systemd/man/machine-id.html",
+  "machine-info(5)": "https://www.freedesktop.org/software/systemd/man/machine-info.html",
+  "modules-load.d(5)": "https://www.freedesktop.org/software/systemd/man/modules-load.d.html",
+  "networkd.conf(5)": "https://www.freedesktop.org/software/systemd/man/networkd.conf.html",
+  "networkd.conf.d(5)": "https://www.freedesktop.org/software/systemd/man/networkd.conf.d.html",
+  "oomd.conf(5)": "https://www.freedesktop.org/software/systemd/man/oomd.conf.html",
+  "oomd.conf.d(5)": "https://www.freedesktop.org/software/systemd/man/oomd.conf.d.html",
+  "org.freedesktop.LogControl1(5)": "https://www.freedesktop.org/software/systemd/man/org.freedesktop.LogControl1.html",
+  "org.freedesktop.home1(5)": "https://www.freedesktop.org/software/systemd/man/org.freedesktop.home1.html",
+  "org.freedesktop.hostname1(5)": "https://www.freedesktop.org/software/systemd/man/org.freedesktop.hostname1.html",
+  "org.freedesktop.import1(5)": "https://www.freedesktop.org/software/systemd/man/org.freedesktop.import1.html",
+  "org.freedesktop.locale1(5)": "https://www.freedesktop.org/software/systemd/man/org.freedesktop.locale1.html",
+  "org.freedesktop.login1(5)": "https://www.freedesktop.org/software/systemd/man/org.freedesktop.login1.html",
+  "org.freedesktop.machine1(5)": "https://www.freedesktop.org/software/systemd/man/org.freedesktop.machine1.html",
+  "org.freedesktop.network1(5)": "https://www.freedesktop.org/software/systemd/man/org.freedesktop.network1.html",
+  "org.freedesktop.oom1(5)": "https://www.freedesktop.org/software/systemd/man/org.freedesktop.oom1.html",
+  "org.freedesktop.portable1(5)": "https://www.freedesktop.org/software/systemd/man/org.freedesktop.portable1.html",
+  "org.freedesktop.resolve1(5)": "https://www.freedesktop.org/software/systemd/man/org.freedesktop.resolve1.html",
+  "org.freedesktop.systemd1(5)": "https://www.freedesktop.org/software/systemd/man/org.freedesktop.systemd1.html",
+  "org.freedesktop.timedate1(5)": "https://www.freedesktop.org/software/systemd/man/org.freedesktop.timedate1.html",
+  "os-release(5)": "https://www.freedesktop.org/software/systemd/man/os-release.html",
+  "pstore.conf(5)": "https://www.freedesktop.org/software/systemd/man/pstore.conf.html",
+  "pstore.conf.d(5)": "https://www.freedesktop.org/software/systemd/man/pstore.conf.d.html",
+  "repart.d(5)": "https://www.freedesktop.org/software/systemd/man/repart.d.html",
+  "resolved.conf(5)": "https://www.freedesktop.org/software/systemd/man/resolved.conf.html",
+  "resolved.conf.d(5)": "https://www.freedesktop.org/software/systemd/man/resolved.conf.d.html",
+  "sleep.conf.d(5)": "https://www.freedesktop.org/software/systemd/man/sleep.conf.d.html",
+  "sysctl.d(5)": "https://www.freedesktop.org/software/systemd/man/sysctl.d.html",
+  "system.conf.d(5)": "https://www.freedesktop.org/software/systemd/man/system.conf.d.html",
+  "systemd-sleep.conf(5)": "https://www.freedesktop.org/software/systemd/man/systemd-sleep.conf.html",
+  "systemd-system.conf(5)": "https://www.freedesktop.org/software/systemd/man/systemd-system.conf.html",
+  "systemd-user-runtime-dir(5)": "https://www.freedesktop.org/software/systemd/man/systemd-user-runtime-dir.html",
+  "systemd-user.conf(5)": "https://www.freedesktop.org/software/systemd/man/systemd-user.conf.html",
+  "systemd.automount(5)": "https://www.freedesktop.org/software/systemd/man/systemd.automount.html",
+  "systemd.device(5)": "https://www.freedesktop.org/software/systemd/man/systemd.device.html",
+  "systemd.dnssd(5)": "https://www.freedesktop.org/software/systemd/man/systemd.dnssd.html",
+  "systemd.exec(5)": "https://www.freedesktop.org/software/systemd/man/systemd.exec.html",
+  "systemd.kill(5)": "https://www.freedesktop.org/software/systemd/man/systemd.kill.html",
+  "systemd.link(5)": "https://www.freedesktop.org/software/systemd/man/systemd.link.html",
+  "systemd.mount(5)": "https://www.freedesktop.org/software/systemd/man/systemd.mount.html",
+  "systemd.negative(5)": "https://www.freedesktop.org/software/systemd/man/systemd.negative.html",
+  "systemd.netdev(5)": "https://www.freedesktop.org/software/systemd/man/systemd.netdev.html",
+  "systemd.network(5)": "https://www.freedesktop.org/software/systemd/man/systemd.network.html",
+  "systemd.nspawn(5)": "https://www.freedesktop.org/software/systemd/man/systemd.nspawn.html",
+  "systemd.path(5)": "https://www.freedesktop.org/software/systemd/man/systemd.path.html",
+  "systemd.positive(5)": "https://www.freedesktop.org/software/systemd/man/systemd.positive.html",
+  "systemd.preset(5)": "https://www.freedesktop.org/software/systemd/man/systemd.preset.html",
+  "systemd.resource-control(5)": "https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html",
+  "systemd.scope(5)": "https://www.freedesktop.org/software/systemd/man/systemd.scope.html",
+  "systemd.service(5)": "https://www.freedesktop.org/software/systemd/man/systemd.service.html",
+  "systemd.slice(5)": "https://www.freedesktop.org/software/systemd/man/systemd.slice.html",
+  "systemd.socket(5)": "https://www.freedesktop.org/software/systemd/man/systemd.socket.html",
+  "systemd.swap(5)": "https://www.freedesktop.org/software/systemd/man/systemd.swap.html",
+  "systemd.target(5)": "https://www.freedesktop.org/software/systemd/man/systemd.target.html",
+  "systemd.timer(5)": "https://www.freedesktop.org/software/systemd/man/systemd.timer.html",
+  "systemd.unit(5)": "https://www.freedesktop.org/software/systemd/man/systemd.unit.html",
+  "sysupdate.d(5)": "https://www.freedesktop.org/software/systemd/man/sysupdate.d.html",
+  "sysusers.d(5)": "https://www.freedesktop.org/software/systemd/man/sysusers.d.html",
+  "timesyncd.conf(5)": "https://www.freedesktop.org/software/systemd/man/timesyncd.conf.html",
+  "timesyncd.conf.d(5)": "https://www.freedesktop.org/software/systemd/man/timesyncd.conf.d.html",
+  "tmpfiles.d(5)": "https://www.freedesktop.org/software/systemd/man/tmpfiles.d.html",
+  "udev.conf(5)": "https://www.freedesktop.org/software/systemd/man/udev.conf.html",
+  "user-runtime-dir@.service(5)": "https://www.freedesktop.org/software/systemd/man/user-runtime-dir@.service.html",
+  "user.conf.d(5)": "https://www.freedesktop.org/software/systemd/man/user.conf.d.html",
+  "user@.service(5)": "https://www.freedesktop.org/software/systemd/man/user@.service.html",
+  "vconsole.conf(5)": "https://www.freedesktop.org/software/systemd/man/vconsole.conf.html",
+  "veritytab(5)": "https://www.freedesktop.org/software/systemd/man/veritytab.html",
+  "bootup(7)": "https://www.freedesktop.org/software/systemd/man/bootup.html",
+  "daemon(7)": "https://www.freedesktop.org/software/systemd/man/daemon.html",
+  "file-hierarchy(7)": "https://www.freedesktop.org/software/systemd/man/file-hierarchy.html",
+  "hwdb(7)": "https://www.freedesktop.org/software/systemd/man/hwdb.html",
+  "kernel-command-line(7)": "https://www.freedesktop.org/software/systemd/man/kernel-command-line.html",
+  "linuxaa64.efi.stub(7)": "https://www.freedesktop.org/software/systemd/man/linuxaa64.efi.stub.html",
+  "linuxia32.efi.stub(7)": "https://www.freedesktop.org/software/systemd/man/linuxia32.efi.stub.html",
+  "linuxx64.efi.stub(7)": "https://www.freedesktop.org/software/systemd/man/linuxx64.efi.stub.html",
+  "sd-boot(7)": "https://www.freedesktop.org/software/systemd/man/sd-boot.html",
+  "sd-stub(7)": "https://www.freedesktop.org/software/systemd/man/sd-stub.html",
+  "smbios-type-11(7)": "https://www.freedesktop.org/software/systemd/man/smbios-type-11.html",
+  "systemd-boot(7)": "https://www.freedesktop.org/software/systemd/man/systemd-boot.html",
+  "systemd-stub(7)": "https://www.freedesktop.org/software/systemd/man/systemd-stub.html",
+  "systemd.directives(7)": "https://www.freedesktop.org/software/systemd/man/systemd.directives.html",
+  "systemd.environment-generator(7)": "https://www.freedesktop.org/software/systemd/man/systemd.environment-generator.html",
+  "systemd.generator(7)": "https://www.freedesktop.org/software/systemd/man/systemd.generator.html",
+  "systemd.image-policy(7)": "https://www.freedesktop.org/software/systemd/man/systemd.image-policy.html",
+  "systemd.index(7)": "https://www.freedesktop.org/software/systemd/man/systemd.index.html",
+  "systemd.journal-fields(7)": "https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html",
+  "systemd.net-naming-scheme(7)": "https://www.freedesktop.org/software/systemd/man/systemd.net-naming-scheme.html",
+  "systemd.offline-updates(7)": "https://www.freedesktop.org/software/systemd/man/systemd.offline-updates.html",
+  "systemd.special(7)": "https://www.freedesktop.org/software/systemd/man/systemd.special.html",
+  "systemd.syntax(7)": "https://www.freedesktop.org/software/systemd/man/systemd.syntax.html",
+  "systemd.system-credentials(7)": "https://www.freedesktop.org/software/systemd/man/systemd.system-credentials.html",
+  "systemd.time(7)": "https://www.freedesktop.org/software/systemd/man/systemd.time.html",
+  "udev(7)": "https://www.freedesktop.org/software/systemd/man/udev.html",
+  "30-systemd-environment-d-generator(8)": "https://www.freedesktop.org/software/systemd/man/30-systemd-environment-d-generator.html",
+  "halt(8)": "https://www.freedesktop.org/software/systemd/man/halt.html",
+  "kernel-install(8)": "https://www.freedesktop.org/software/systemd/man/kernel-install.html",
+  "libnss_myhostname.so.2(8)": "https://www.freedesktop.org/software/systemd/man/libnss_myhostname.so.2.html",
+  "libnss_mymachines.so.2(8)": "https://www.freedesktop.org/software/systemd/man/libnss_mymachines.so.2.html",
+  "libnss_resolve.so.2(8)": "https://www.freedesktop.org/software/systemd/man/libnss_resolve.so.2.html",
+  "libnss_systemd.so.2(8)": "https://www.freedesktop.org/software/systemd/man/libnss_systemd.so.2.html",
+  "nss-myhostname(8)": "https://www.freedesktop.org/software/systemd/man/nss-myhostname.html",
+  "nss-mymachines(8)": "https://www.freedesktop.org/software/systemd/man/nss-mymachines.html",
+  "nss-resolve(8)": "https://www.freedesktop.org/software/systemd/man/nss-resolve.html",
+  "nss-systemd(8)": "https://www.freedesktop.org/software/systemd/man/nss-systemd.html",
+  "pam_systemd(8)": "https://www.freedesktop.org/software/systemd/man/pam_systemd.html",
+  "pam_systemd_home(8)": "https://www.freedesktop.org/software/systemd/man/pam_systemd_home.html",
+  "poweroff(8)": "https://www.freedesktop.org/software/systemd/man/poweroff.html",
+  "reboot(8)": "https://www.freedesktop.org/software/systemd/man/reboot.html",
+  "shutdown(8)": "https://www.freedesktop.org/software/systemd/man/shutdown.html",
+  "systemd-ask-password-console.path(8)": "https://www.freedesktop.org/software/systemd/man/systemd-ask-password-console.path.html",
+  "systemd-ask-password-console.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-ask-password-console.service.html",
+  "systemd-ask-password-wall.path(8)": "https://www.freedesktop.org/software/systemd/man/systemd-ask-password-wall.path.html",
+  "systemd-ask-password-wall.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-ask-password-wall.service.html",
+  "systemd-backlight(8)": "https://www.freedesktop.org/software/systemd/man/systemd-backlight.html",
+  "systemd-backlight@.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-backlight@.service.html",
+  "systemd-battery-check(8)": "https://www.freedesktop.org/software/systemd/man/systemd-battery-check.html",
+  "systemd-binfmt(8)": "https://www.freedesktop.org/software/systemd/man/systemd-binfmt.html",
+  "systemd-bless-boot-generator(8)": "https://www.freedesktop.org/software/systemd/man/systemd-bless-boot-generator.html",
+  "systemd-bless-boot(8)": "https://www.freedesktop.org/software/systemd/man/systemd-bless-boot.html",
+  "systemd-boot-check-no-failures(8)": "https://www.freedesktop.org/software/systemd/man/systemd-boot-check-no-failures.html",
+  "systemd-boot-random-seed.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-boot-random-seed.service.html",
+  "systemd-confext(8)": "https://www.freedesktop.org/software/systemd/man/systemd-confext.html",
+  "systemd-confext.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-confext.service.html",
+  "systemd-coredump(8)": "https://www.freedesktop.org/software/systemd/man/systemd-coredump.html",
+  "systemd-coredump.socket(8)": "https://www.freedesktop.org/software/systemd/man/systemd-coredump.socket.html",
+  "systemd-coredump@.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-coredump@.service.html",
+  "systemd-cryptsetup-generator(8)": "https://www.freedesktop.org/software/systemd/man/systemd-cryptsetup-generator.html",
+  "systemd-cryptsetup(8)": "https://www.freedesktop.org/software/systemd/man/systemd-cryptsetup.html",
+  "systemd-cryptsetup@.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-cryptsetup@.service.html",
+  "systemd-debug-generator(8)": "https://www.freedesktop.org/software/systemd/man/systemd-debug-generator.html",
+  "systemd-environment-d-generator(8)": "https://www.freedesktop.org/software/systemd/man/systemd-environment-d-generator.html",
+  "systemd-fsck-root.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-fsck-root.service.html",
+  "systemd-fsck-usr.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-fsck-usr.service.html",
+  "systemd-fsck(8)": "https://www.freedesktop.org/software/systemd/man/systemd-fsck.html",
+  "systemd-fsck@.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-fsck@.service.html",
+  "systemd-fstab-generator(8)": "https://www.freedesktop.org/software/systemd/man/systemd-fstab-generator.html",
+  "systemd-getty-generator(8)": "https://www.freedesktop.org/software/systemd/man/systemd-getty-generator.html",
+  "systemd-gpt-auto-generator(8)": "https://www.freedesktop.org/software/systemd/man/systemd-gpt-auto-generator.html",
+  "systemd-growfs-root.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-growfs-root.service.html",
+  "systemd-growfs(8)": "https://www.freedesktop.org/software/systemd/man/systemd-growfs.html",
+  "systemd-growfs@.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-growfs@.service.html",
+  "systemd-halt.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-halt.service.html",
+  "systemd-hibernate-resume-generator(8)": "https://www.freedesktop.org/software/systemd/man/systemd-hibernate-resume-generator.html",
+  "systemd-hibernate-resume(8)": "https://www.freedesktop.org/software/systemd/man/systemd-hibernate-resume.html",
+  "systemd-hibernate.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-hibernate.service.html",
+  "systemd-homed(8)": "https://www.freedesktop.org/software/systemd/man/systemd-homed.html",
+  "systemd-hostnamed(8)": "https://www.freedesktop.org/software/systemd/man/systemd-hostnamed.html",
+  "systemd-hwdb(8)": "https://www.freedesktop.org/software/systemd/man/systemd-hwdb.html",
+  "systemd-hybrid-sleep.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-hybrid-sleep.service.html",
+  "systemd-importd(8)": "https://www.freedesktop.org/software/systemd/man/systemd-importd.html",
+  "systemd-integritysetup-generator(8)": "https://www.freedesktop.org/software/systemd/man/systemd-integritysetup-generator.html",
+  "systemd-integritysetup(8)": "https://www.freedesktop.org/software/systemd/man/systemd-integritysetup.html",
+  "systemd-integritysetup@.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-integritysetup@.service.html",
+  "systemd-journal-gatewayd(8)": "https://www.freedesktop.org/software/systemd/man/systemd-journal-gatewayd.html",
+  "systemd-journal-gatewayd.socket(8)": "https://www.freedesktop.org/software/systemd/man/systemd-journal-gatewayd.socket.html",
+  "systemd-journal-remote(8)": "https://www.freedesktop.org/software/systemd/man/systemd-journal-remote.html",
+  "systemd-journal-remote.socket(8)": "https://www.freedesktop.org/software/systemd/man/systemd-journal-remote.socket.html",
+  "systemd-journal-upload(8)": "https://www.freedesktop.org/software/systemd/man/systemd-journal-upload.html",
+  "systemd-journald-audit.socket(8)": "https://www.freedesktop.org/software/systemd/man/systemd-journald-audit.socket.html",
+  "systemd-journald-dev-log.socket(8)": "https://www.freedesktop.org/software/systemd/man/systemd-journald-dev-log.socket.html",
+  "systemd-journald-varlink@.socket(8)": "https://www.freedesktop.org/software/systemd/man/systemd-journald-varlink@.socket.html",
+  "systemd-journald(8)": "https://www.freedesktop.org/software/systemd/man/systemd-journald.html",
+  "systemd-journald.socket(8)": "https://www.freedesktop.org/software/systemd/man/systemd-journald.socket.html",
+  "systemd-journald@.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-journald@.service.html",
+  "systemd-journald@.socket(8)": "https://www.freedesktop.org/software/systemd/man/systemd-journald@.socket.html",
+  "systemd-kexec.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-kexec.service.html",
+  "systemd-localed(8)": "https://www.freedesktop.org/software/systemd/man/systemd-localed.html",
+  "systemd-logind(8)": "https://www.freedesktop.org/software/systemd/man/systemd-logind.html",
+  "systemd-machine-id-commit.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-machine-id-commit.service.html",
+  "systemd-machined(8)": "https://www.freedesktop.org/software/systemd/man/systemd-machined.html",
+  "systemd-makefs(8)": "https://www.freedesktop.org/software/systemd/man/systemd-makefs.html",
+  "systemd-makefs@.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-makefs@.service.html",
+  "systemd-mkswap@.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-mkswap@.service.html",
+  "systemd-modules-load(8)": "https://www.freedesktop.org/software/systemd/man/systemd-modules-load.html",
+  "systemd-network-generator(8)": "https://www.freedesktop.org/software/systemd/man/systemd-network-generator.html",
+  "systemd-networkd-wait-online(8)": "https://www.freedesktop.org/software/systemd/man/systemd-networkd-wait-online.html",
+  "systemd-networkd-wait-online@.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-networkd-wait-online@.service.html",
+  "systemd-networkd(8)": "https://www.freedesktop.org/software/systemd/man/systemd-networkd.html",
+  "systemd-oomd(8)": "https://www.freedesktop.org/software/systemd/man/systemd-oomd.html",
+  "systemd-pcrfs-root.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-pcrfs-root.service.html",
+  "systemd-pcrfs@.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-pcrfs@.service.html",
+  "systemd-pcrmachine.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-pcrmachine.service.html",
+  "systemd-pcrphase-initrd.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-pcrphase-initrd.service.html",
+  "systemd-pcrphase-sysinit.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-pcrphase-sysinit.service.html",
+  "systemd-pcrphase(8)": "https://www.freedesktop.org/software/systemd/man/systemd-pcrphase.html",
+  "systemd-portabled(8)": "https://www.freedesktop.org/software/systemd/man/systemd-portabled.html",
+  "systemd-poweroff.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-poweroff.service.html",
+  "systemd-pstore(8)": "https://www.freedesktop.org/software/systemd/man/systemd-pstore.html",
+  "systemd-random-seed(8)": "https://www.freedesktop.org/software/systemd/man/systemd-random-seed.html",
+  "systemd-reboot.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-reboot.service.html",
+  "systemd-remount-fs(8)": "https://www.freedesktop.org/software/systemd/man/systemd-remount-fs.html",
+  "systemd-repart(8)": "https://www.freedesktop.org/software/systemd/man/systemd-repart.html",
+  "systemd-repart.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-repart.service.html",
+  "systemd-resolved(8)": "https://www.freedesktop.org/software/systemd/man/systemd-resolved.html",
+  "systemd-rfkill(8)": "https://www.freedesktop.org/software/systemd/man/systemd-rfkill.html",
+  "systemd-rfkill.socket(8)": "https://www.freedesktop.org/software/systemd/man/systemd-rfkill.socket.html",
+  "systemd-run-generator(8)": "https://www.freedesktop.org/software/systemd/man/systemd-run-generator.html",
+  "systemd-shutdown(8)": "https://www.freedesktop.org/software/systemd/man/systemd-shutdown.html",
+  "systemd-sleep(8)": "https://www.freedesktop.org/software/systemd/man/systemd-sleep.html",
+  "systemd-socket-proxyd(8)": "https://www.freedesktop.org/software/systemd/man/systemd-socket-proxyd.html",
+  "systemd-soft-reboot.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-soft-reboot.service.html",
+  "systemd-suspend-then-hibernate.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-suspend-then-hibernate.service.html",
+  "systemd-suspend.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-suspend.service.html",
+  "systemd-sysctl(8)": "https://www.freedesktop.org/software/systemd/man/systemd-sysctl.html",
+  "systemd-sysext(8)": "https://www.freedesktop.org/software/systemd/man/systemd-sysext.html",
+  "systemd-sysext.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-sysext.service.html",
+  "systemd-system-update-generator(8)": "https://www.freedesktop.org/software/systemd/man/systemd-system-update-generator.html",
+  "systemd-sysupdate-reboot.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-sysupdate-reboot.service.html",
+  "systemd-sysupdate-reboot.timer(8)": "https://www.freedesktop.org/software/systemd/man/systemd-sysupdate-reboot.timer.html",
+  "systemd-sysupdate(8)": "https://www.freedesktop.org/software/systemd/man/systemd-sysupdate.html",
+  "systemd-sysupdate.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-sysupdate.service.html",
+  "systemd-sysupdate.timer(8)": "https://www.freedesktop.org/software/systemd/man/systemd-sysupdate.timer.html",
+  "systemd-sysusers(8)": "https://www.freedesktop.org/software/systemd/man/systemd-sysusers.html",
+  "systemd-sysusers.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-sysusers.service.html",
+  "systemd-time-wait-sync(8)": "https://www.freedesktop.org/software/systemd/man/systemd-time-wait-sync.html",
+  "systemd-timedated(8)": "https://www.freedesktop.org/software/systemd/man/systemd-timedated.html",
+  "systemd-timesyncd(8)": "https://www.freedesktop.org/software/systemd/man/systemd-timesyncd.html",
+  "systemd-tmpfiles-clean.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-tmpfiles-clean.service.html",
+  "systemd-tmpfiles-clean.timer(8)": "https://www.freedesktop.org/software/systemd/man/systemd-tmpfiles-clean.timer.html",
+  "systemd-tmpfiles-setup-dev-early.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-tmpfiles-setup-dev-early.service.html",
+  "systemd-tmpfiles-setup-dev.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-tmpfiles-setup-dev.service.html",
+  "systemd-tmpfiles-setup.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-tmpfiles-setup.service.html",
+  "systemd-tmpfiles(8)": "https://www.freedesktop.org/software/systemd/man/systemd-tmpfiles.html",
+  "systemd-udev-settle.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-udev-settle.service.html",
+  "systemd-udevd-control.socket(8)": "https://www.freedesktop.org/software/systemd/man/systemd-udevd-control.socket.html",
+  "systemd-udevd-kernel.socket(8)": "https://www.freedesktop.org/software/systemd/man/systemd-udevd-kernel.socket.html",
+  "systemd-udevd(8)": "https://www.freedesktop.org/software/systemd/man/systemd-udevd.html",
+  "systemd-update-done(8)": "https://www.freedesktop.org/software/systemd/man/systemd-update-done.html",
+  "systemd-update-utmp-runlevel.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-update-utmp-runlevel.service.html",
+  "systemd-update-utmp(8)": "https://www.freedesktop.org/software/systemd/man/systemd-update-utmp.html",
+  "systemd-user-sessions(8)": "https://www.freedesktop.org/software/systemd/man/systemd-user-sessions.html",
+  "systemd-userdbd(8)": "https://www.freedesktop.org/software/systemd/man/systemd-userdbd.html",
+  "systemd-vconsole-setup(8)": "https://www.freedesktop.org/software/systemd/man/systemd-vconsole-setup.html",
+  "systemd-veritysetup-generator(8)": "https://www.freedesktop.org/software/systemd/man/systemd-veritysetup-generator.html",
+  "systemd-veritysetup(8)": "https://www.freedesktop.org/software/systemd/man/systemd-veritysetup.html",
+  "systemd-veritysetup@.service(8)": "https://www.freedesktop.org/software/systemd/man/systemd-veritysetup@.service.html",
+  "systemd-volatile-root(8)": "https://www.freedesktop.org/software/systemd/man/systemd-volatile-root.html",
+  "systemd-xdg-autostart-generator(8)": "https://www.freedesktop.org/software/systemd/man/systemd-xdg-autostart-generator.html",
+  "udevadm(8)": "https://www.freedesktop.org/software/systemd/man/udevadm.html",
+  "passwd(5)": "https://man.archlinux.org/man/passwd.5",
+  "group(5)": "https://man.archlinux.org/man/group.5",
+  "login.defs(5)": "https://man.archlinux.org/man/login.defs.5",
+  "unshare(1)": "https://man.archlinux.org/man/unshare.1.en",
+  "nix-shell(1)": "https://nixos.org/manual/nix/stable/command-ref/nix-shell.html",
+  "mksquashfs(1)": "https://man.archlinux.org/man/extra/squashfs-tools/mksquashfs.1.en"
+}
diff --git a/nixpkgs/doc/manual.md.in b/nixpkgs/doc/manual.md.in
new file mode 100644
index 000000000000..642247e16612
--- /dev/null
+++ b/nixpkgs/doc/manual.md.in
@@ -0,0 +1,15 @@
+# Nixpkgs Reference Manual {#nixpkgs-manual}
+## Version @MANUAL_VERSION@
+
+```{=include=} chapters
+preface.chapter.md
+```
+
+```{=include=} parts
+using-nixpkgs.md
+lib.md
+stdenv.md
+build-helpers.md
+development.md
+contributing.md
+```
diff --git a/nixpkgs/doc/module-system/module-system.chapter.md b/nixpkgs/doc/module-system/module-system.chapter.md
new file mode 100644
index 000000000000..927f66073748
--- /dev/null
+++ b/nixpkgs/doc/module-system/module-system.chapter.md
@@ -0,0 +1,105 @@
+# Module System {#module-system}
+
+## Introduction {#module-system-introduction}
+
+The module system is a language for handling configuration, implemented as a Nix library.
+
+Compared to plain Nix, it adds documentation, type checking and composition or extensibility.
+
+::: {.note}
+This chapter is new and not complete yet. For a gentle introduction to the module system, in the context of NixOS, see [Writing NixOS Modules](https://nixos.org/manual/nixos/unstable/index.html#sec-writing-modules) in the NixOS manual.
+:::
+
+
+## `lib.evalModules` {#module-system-lib-evalModules}
+
+Evaluate a set of modules. This function is typically only used once per application (e.g. once in NixOS, once in Home Manager, ...).
+
+### Parameters {#module-system-lib-evalModules-parameters}
+
+#### `modules` {#module-system-lib-evalModules-param-modules}
+
+A list of modules. These are merged together to form the final configuration.
+<!-- TODO link to section about merging, TBD -->
+
+#### `specialArgs` {#module-system-lib-evalModules-param-specialArgs}
+
+An attribute set of module arguments that can be used in `imports`.
+
+This is in contrast to `config._module.args`, which is only available after all `imports` have been resolved.
+
+#### `class` {#module-system-lib-evalModules-param-class}
+
+If the `class` attribute is set and non-`null`, the module system will reject `imports` with a different `_class` declaration.
+
+The `class` value should be a string in lower [camel case](https://en.wikipedia.org/wiki/Camel_case).
+
+If applicable, the `class` should match the "prefix" of the attributes used in (experimental) [flakes](https://nixos.org/manual/nix/stable/command-ref/new-cli/nix3-flake.html#description). Some examples are:
+
+ - `nixos` as in `flake.nixosModules`
+ - `nixosTest`: modules that constitute a [NixOS VM test](https://nixos.org/manual/nixos/stable/index.html#sec-nixos-tests)
+<!-- We've only just started with `class`. You're invited to add a few more. -->
+
+#### `prefix` {#module-system-lib-evalModules-param-prefix}
+
+A list of strings representing the location at or below which all options are evaluated. This is used by `types.submodule` to improve error reporting and find the implicit `name` module argument.
+
+### Return value {#module-system-lib-evalModules-return-value}
+
+The result is an attribute set with the following attributes:
+
+#### `options` {#module-system-lib-evalModules-return-value-options}
+
+The nested attribute set of all option declarations.
+
+#### `config` {#module-system-lib-evalModules-return-value-config}
+
+The nested attribute set of all option values.
+
+#### `type` {#module-system-lib-evalModules-return-value-type}
+
+A module system type. This type is an instance of `types.submoduleWith` containing the current [`modules`](#module-system-lib-evalModules-param-modules).
+
+The option definitions that are typed with this type will extend the current set of modules, like [`extendModules`](#module-system-lib-evalModules-return-value-extendModules).
+
+However, the value returned from the type is just the [`config`](#module-system-lib-evalModules-return-value-config), like any submodule.
+
+If you're familiar with prototype inheritance, you can think of this `evalModules` invocation as the prototype, and usages of this type as the instances.
+
+This type is also available to the [`modules`](#module-system-lib-evalModules-param-modules) as the module argument `moduleType`.
+<!-- TODO: document the module arguments. Using moduleType is like saying: suppose this configuration was extended. -->
+
+#### `extendModules` {#module-system-lib-evalModules-return-value-extendModules}
+
+A function similar to `evalModules` but building on top of the already passed [`modules`](#module-system-lib-evalModules-param-modules). Its arguments, `modules` and `specialArgs` are added to the existing values.
+
+If you're familiar with prototype inheritance, you can think of the current, actual `evalModules` invocation as the prototype, and the return value of `extendModules` as the instance.
+
+This functionality is also available to modules as the `extendModules` module argument.
+
+::: {.note}
+
+**Evaluation Performance**
+
+`extendModules` returns a configuration that shares very little with the original `evalModules` invocation, because the module arguments may be different.
+
+So if you have a configuration that has been (or will be) largely evaluated, almost none of the computation is shared with the configuration returned by `extendModules`.
+
+The real work of module evaluation happens while computing the values in `config` and `options`, so multiple invocations of `extendModules` have a particularly small cost, as long as only the final `config` and `options` are evaluated.
+
+If you do reference multiple `config` (or `options`) from before and after `extendModules`, evaluation performance is the same as with multiple `evalModules` invocations, because the new modules' ability to override existing configuration fundamentally requires constructing a new `config` and `options` fixpoint.
+:::
+
+#### `_module` {#module-system-lib-evalModules-return-value-_module}
+
+A portion of the configuration tree which is elided from `config`.
+
+<!-- TODO: when markdown migration is complete, make _module docs visible again and reference _module docs. Maybe move those docs into this chapter? -->
+
+#### `_type` {#module-system-lib-evalModules-return-value-_type}
+
+A nominal type marker, always `"configuration"`.
+
+#### `class` {#module-system-lib-evalModules-return-value-_configurationClass}
+
+The [`class` argument](#module-system-lib-evalModules-param-class).
diff --git a/nixpkgs/doc/old/cross.txt b/nixpkgs/doc/old/cross.txt
new file mode 100644
index 000000000000..0f958e772b78
--- /dev/null
+++ b/nixpkgs/doc/old/cross.txt
@@ -0,0 +1,329 @@
+Setting up a cross compiler with Nix
+
+"Cross compilation" means compiling a program on one machine for another
+type of machine. A typical use of cross compilation is to compile programs
+for embedded devices. These devices often don't have the computing power
+and memory to compile programs natively.
+
+For a fully working cross compiler the following are needed:
+
+* cross binutils: assembler, archiver, linker, etcetera that understand
+the format of the target system
+
+* cross compiler: a compiler that can generate binary code and object files
+for the target platform
+
+* cross C library: a library to link object files with to create fully
+functional programs
+
+Cross compilers are difficult to set up. A lot of people report that they
+cannot succeed in building a cross toolchain successfully. The answers
+usually consist of "download this pre-built toolchain", which is equally
+unhelpful.
+
+A toolchain is set up in five steps:
+
+1. build binutils to that can run on the host platform, but generate code
+for the target platform
+
+2. build Linux kernel headers for the target platform
+
+3. build a minimal C only version of GCC, that can run on the host platform
+and generate code for the target platform
+
+4. build a C library for the target platform. This includes the dynamic
+linker, C library, etc.
+
+5. build a full GCC
+
+****
+NB:
+
+Keep in mind that many programs are not very well suited for cross
+compilation. Either they are not intended to run on other platforms,
+because the code is highly platform specific, or the configuration process
+is not written with cross compilation in mind.
+
+Nix will not solve these problems for you!
+***
+
+This document describes to set up a cross compiler to generate code for
+arm-linux with uClibc and runs on i686-linux. The "stdenv" used is the
+default from the standard Nix packages collection.
+
+Step 1: build binutils for arm-linux in the stdenv for i686-linux
+
+---
+{stdenv, fetchurl, noSysDirs}:
+
+stdenv.mkDerivation {
+  name = "binutils-2.16.1-arm";
+  builder = ./builder.sh;
+  src = fetchurl {
+    url = "http://ftp.nluug.nl/gnu/binutils/binutils-2.16.1.tar.bz2";
+    hash = "sha256-14pv+YKrL3NyFwbnv9MoWsZHgEZk5+pHhuZtAfkcVsU=";
+  };
+  inherit noSysDirs;
+  configureFlags = [ "--target=arm-linux" ];
+}
+---
+
+This will compile binutils that will run on i686-linux, but knows the
+format used by arm-linux.
+
+Step 2: build kernel headers for the target architecture
+
+  default.nix for kernel-headers-arm:
+
+---
+{stdenv, fetchurl}:
+
+assert stdenv.buildPlatform.system == "i686-linux";
+
+stdenv.mkDerivation {
+  name = "linux-headers-2.6.13.1-arm";
+  builder = ./builder.sh;
+  src = fetchurl {
+    url = "http://www.kernel.org/pub/linux/kernel/v2.6/linux-2.6.13.1.tar.bz2";
+    hash = "sha256-qtICDjfiA1HxWBrHqtB5DCv9s9/HyznKV1C6IxCrHYs=";
+  };
+}
+---
+
+  builder.sh for kernel-headers-arm:
+
+---
+source $stdenv/setup
+
+
+buildPhase() {
+    make include/linux/version.h
+}
+
+buildPhase=buildPhase
+
+
+installPhase() {
+    mkdir $out
+    mkdir $out/include
+    #cd $out/include
+    #ln -s asm-arm asm
+    make include/asm ARCH=arm
+    cp -prvd include/linux include/asm include/asm-arm include/asm-generic $out/include
+    echo -n > $out/include/linux/autoconf.h
+}
+
+installPhase=installPhase
+
+
+genericBuild
+---
+
+Step 3: build a minimal GCC
+
+Extra/different parameters include the target platform and the kernel
+headers argument (this needs a major cleanup, as well as the name, it
+needs to be different!). Profiled compilers are disabled. The tarball
+used here is just gcc-core. For some reason it doesn't install nicely
+if the whole tarball is used (or is this some braino on my side? -- AH).
+
+Only C is used, because for other languages (such as C++) extra libraries
+need to be compiled, for which libraries compiled for the target system
+are needed.
+
+There is a bit of evilness going on. The cross compiled utilities need
+to be either copied to or be linked from the output tree of the compiler.
+(Is this really true? Back this up with arguments! -- AH)
+
+Symbolic links are not something we want inside the Nix store.
+
+---
+{ stdenv, fetchurl, noSysDirs
+, langC ? true, langCC ? true, langF77 ? false
+, profiledCompiler ? false
+, binutilsArm
+, kernelHeadersArm
+}:
+
+assert langC;
+
+stdenv.mkDerivation {
+  name = "gcc-4.0.2-arm";
+  builder = ./builder.sh;
+  src = fetchurl {
+    url = "ftp://ftp.nluug.nl/pub/gnu/gcc/gcc-4.0.2/gcc-core-4.0.2.tar.bz2";
+    hash = "sha256-LANmXRS7/fN2zF5JUJVd8OjNA5aCDsGLQKhSpxWA3Qk=";
+  };
+  # !!! apply only if noSysDirs is set
+  patches = [./no-sys-dirs.patch ./gcc-inhibit.patch];
+  inherit noSysDirs langC langCC langF77 profiledCompiler;
+  buildInputs = [binutilsArm];
+  inherit kernelHeadersArm binutilsArm;
+  platform = "arm-linux";
+}
+---
+
+The builder.sh for a cross-compiler. Note that the binutils are prefixed
+with the architecture name, so arm-linux-ld instead of ld, etc. This is
+necessary because when we cross-compile a lot of programs look for these
+tools with these specific names. The standard gcc-wrapper does not take this
+into account yet.
+
+---
+source $stdenv/setup
+
+
+export NIX_FIXINC_DUMMY=$NIX_BUILD_TOP/dummy
+mkdir $NIX_FIXINC_DUMMY
+
+
+if test "$noSysDirs" = "1"; then
+
+    if test "$noSysDirs" = "1"; then
+        # Figure out what extra flags to pass to the gcc compilers
+        # being generated to make sure that they use our glibc.
+        if test -e $NIX_CC/nix-support/orig-glibc; then
+            glibc=$(cat $NIX_CC/nix-support/orig-glibc)
+            # Ugh.  Copied from gcc-wrapper/builder.sh.  We can't just
+            # source in $NIX_CC/nix-support/add-flags, since that
+            # would cause *this* GCC to be linked against the
+            # *previous* GCC.  Need some more modularity there.
+            extraCFlags="-B$glibc/lib -isystem $glibc/include"
+            extraLDFlags="-B$glibc/lib -L$glibc/lib -Wl,-s \
+              -Wl,-dynamic-linker,$glibc/lib/ld-linux.so.2"
+
+            # Oh, what a hack.  I should be shot for this.
+            # In stage 1, we should link against the previous GCC, but
+            # not afterwards.  Otherwise we retain a dependency.
+            # However, ld-wrapper, which adds the linker flags for the
+            # previous GCC, is also used in stage 2/3.  We can prevent
+            # it from adding them by NIX_GLIBC_FLAGS_SET, but then
+            # gcc-wrapper will also not add them, thereby causing
+            # stage 1 to fail.  So we use a trick to only set the
+            # flags in gcc-wrapper.
+            hook=$(pwd)/ld-wrapper-hook
+            echo "NIX_GLIBC_FLAGS_SET=1" > $hook
+            export NIX_LD_WRAPPER_START_HOOK=$hook
+        fi
+
+        export NIX_EXTRA_CFLAGS=$extraCFlags
+        export NIX_EXTRA_LDFLAGS=$extraLDFlags
+        export CFLAGS=$extraCFlags
+        export CXXFLAGS=$extraCFlags
+        export LDFLAGS=$extraLDFlags
+    fi
+
+else
+    patches=""
+fi
+
+
+preConfigure=preConfigure
+preConfigure() {
+
+    # Determine the frontends to build.
+    langs="c"
+    if test -n "$langCC"; then
+        langs="$langs,c++"
+    fi
+    if test -n "$langF77"; then
+        langs="$langs,f77"
+    fi
+
+    # Cross compiler evilness
+    mkdir -p $out
+    mkdir -p $out/arm-linux
+    mkdir -p $out/arm-linux/bin
+    ln -s $binutilsArm/arm-linux/bin/as $out/arm-linux/bin/as
+    ln -s $binutilsArm/arm-linux/bin/ld $out/arm-linux/bin/ld
+    ln -s $binutilsArm/arm-linux/bin/ar $out/arm-linux/bin/ar
+    ln -s $binutilsArm/arm-linux/bin/ranlib $out/arm-linux/bin/ranlib
+
+    # Perform the build in a different directory.
+    mkdir ../build
+    cd ../build
+
+    configureScript=../$sourceRoot/configure
+    configureFlags="--enable-languages=$langs --target=$platform --disable-threads --disable-libmudflap --disable-shared --with-headers=$kernelHeadersArm/include --disable-multilib"
+}
+
+
+postInstall=postInstall
+postInstall() {
+    # Remove precompiled headers for now.  They are very big and
+    # probably not very useful yet.
+    find $out/include -name "*.gch" -exec rm -rf {} \; -prune
+
+    # Remove `fixincl' to prevent a retained dependency on the
+    # previous gcc.
+    rm -rf $out/libexec/gcc/*/*/install-tools
+}
+
+
+#if test -z "$profiledCompiler"; then
+    #makeFlags="bootstrap"
+#else
+    #makeFlags="profiledbootstrap"
+#fi
+
+genericBuild
+---
+
+Step 4: build a C library for the target platform.
+
+The previous steps are enough to compile a C library. In our case we take
+uClibc. It's intended to be a small sized replacement for glibc. It is widely
+used in embedded environments.
+
+...
+
+Step 5: Build a compiler to link with the newly built C library.
+
+...
+
+If we restrict the compiler to just C programs it is relatively easy,
+since we only need to wrap the GCC we built in the previous step with all
+the right tools and the right C library. Successfully compiled programs with
+this compiler and verified to be working on a HP Jornada 820 running Linux
+are "patch", "make" and "wget".
+
+If we want to build C++ programs it gets a lot more difficult. GCC has a
+three step compilation process. In the first step a simple compiler, called
+xgcc, that can compile only C programs is built. With that compiler it
+compiles itself two more times: one time to build a full compiler, and another
+time to build a full compiler once again with the freshly built compiler from
+step 2. In the second and third step support for C++ is compiled, if this
+is configured.
+
+One of the libraries that has to be built for C++ support step is libstdc++.
+This library uses xgcc, even when cross compiling, since libstdc++ has to be
+compiled for arm-linux.
+
+One of the compiler flags that GCC uses for this compiler is called X_CFLAGS.
+This is used by the Nix build process to set the dynamic linker, glibc
+in the case of i686-linux using the default Nix packages collection.
+
+Obviously, since we need to compile libstc++ for arm-linux with uClibc linking
+will not be done correctly: you can't link object files built for arm-linux
+with a glibc built for i686-linux.
+
+Setting X_CFLAGS to use the uClibc libraries and dynamic linker will fail
+too. Earlier on in the build process these flags are used to compile important
+files like libgcc.a by the host system gcc, which does need to be linked
+to glibc. To make this work correctly you will need to carefully juggle
+with compilation flags. This is still work in progress for Nix.
+
+
+---
+
+After successfully completing the whole toolchain you can start building
+packages with the newly built tools. To make everything build correctly
+you will need a stdenv for your target platform. Setting up this platform
+will take some effort. Right now there is a very experimental setup for
+arm-linux, which needs to be cleaned up before it is production ready.
+
+Please note that many packages are not well suited for cross-compilation.
+Even though the package itself might be very well portable often the
+buildscripts are not. One thing that we have seen that causes frequent
+build failures is the use of the LD variable. This is often set to 'ld'
+and not $(CROSS)-ld.
diff --git a/nixpkgs/doc/packages/cataclysm-dda.section.md b/nixpkgs/doc/packages/cataclysm-dda.section.md
new file mode 100644
index 000000000000..f401e9b9efa5
--- /dev/null
+++ b/nixpkgs/doc/packages/cataclysm-dda.section.md
@@ -0,0 +1,129 @@
+# Cataclysm: Dark Days Ahead {#cataclysm-dark-days-ahead}
+
+## How to install Cataclysm DDA {#how-to-install-cataclysm-dda}
+
+To install the latest stable release of Cataclysm DDA to your profile, execute
+`nix-env -f "<nixpkgs>" -iA cataclysm-dda`. For the curses build (build
+without tiles), install `cataclysmDDA.stable.curses`. Note: `cataclysm-dda` is
+an alias to `cataclysmDDA.stable.tiles`.
+
+If you like access to a development build of your favorite git revision,
+override `cataclysm-dda-git` (or `cataclysmDDA.git.curses` if you like curses
+build):
+
+```nix
+cataclysm-dda-git.override {
+  version = "YYYY-MM-DD";
+  rev = "YOUR_FAVORITE_REVISION";
+  sha256 = "CHECKSUM_OF_THE_REVISION";
+}
+```
+
+The sha256 checksum can be obtained by
+
+```sh
+nix-prefetch-url --unpack "https://github.com/CleverRaven/Cataclysm-DDA/archive/${YOUR_FAVORITE_REVISION}.tar.gz"
+```
+
+The default configuration directory is `~/.cataclysm-dda`. If you prefer
+`$XDG_CONFIG_HOME/cataclysm-dda`, override the derivation:
+
+```nix
+cataclysm-dda.override {
+  useXdgDir = true;
+}
+```
+
+## Important note for overriding packages {#important-note-for-overriding-packages}
+
+After applying `overrideAttrs`, you need to fix `passthru.pkgs` and
+`passthru.withMods` attributes either manually or by using `attachPkgs`:
+
+```nix
+let
+  # You enabled parallel building.
+  myCDDA = cataclysm-dda-git.overrideAttrs (_: {
+    enableParallelBuilding = true;
+  });
+
+  # Unfortunately, this refers to the package before overriding and
+  # parallel building is still disabled.
+  badExample = myCDDA.withMods (_: []);
+
+  inherit (cataclysmDDA) attachPkgs pkgs wrapCDDA;
+
+  # You can fix it by hand
+  goodExample1 = myCDDA.overrideAttrs (old: {
+    passthru = old.passthru // {
+      pkgs = pkgs.override { build = goodExample1; };
+      withMods = wrapCDDA goodExample1;
+    };
+  });
+
+  # or by using a helper function `attachPkgs`.
+  goodExample2 = attachPkgs pkgs myCDDA;
+in
+
+# badExample                     # parallel building disabled
+# goodExample1.withMods (_: [])  # parallel building enabled
+goodExample2.withMods (_: [])    # parallel building enabled
+```
+
+## Customizing with mods {#customizing-with-mods}
+
+To install Cataclysm DDA with mods of your choice, you can use `withMods`
+attribute:
+
+```nix
+cataclysm-dda.withMods (mods: with mods; [
+  tileset.UndeadPeople
+])
+```
+
+All mods, soundpacks, and tilesets available in nixpkgs are found in
+`cataclysmDDA.pkgs`.
+
+Here is an example to modify existing mods and/or add more mods not available
+in nixpkgs:
+
+```nix
+let
+  customMods = self: super: lib.recursiveUpdate super {
+    # Modify existing mod
+    tileset.UndeadPeople = super.tileset.UndeadPeople.overrideAttrs (old: {
+      # If you like to apply a patch to the tileset for example
+      patches = [ ./path/to/your.patch ];
+    });
+
+    # Add another mod
+    mod.Awesome = cataclysmDDA.buildMod {
+      modName = "Awesome";
+      version = "0.x";
+      src = fetchFromGitHub {
+        owner = "Someone";
+        repo = "AwesomeMod";
+        rev = "...";
+        hash = "...";
+      };
+      # Path to be installed in the unpacked source (default: ".")
+      modRoot = "contents/under/this/path/will/be/installed";
+    };
+
+    # Add another soundpack
+    soundpack.Fantastic = cataclysmDDA.buildSoundPack {
+      # ditto
+    };
+
+    # Add another tileset
+    tileset.SuperDuper = cataclysmDDA.buildTileSet {
+      # ditto
+    };
+  };
+in
+cataclysm-dda.withMods (mods: with mods.extend customMods; [
+  tileset.UndeadPeople
+  mod.Awesome
+  soundpack.Fantastic
+  tileset.SuperDuper
+])
+```
diff --git a/nixpkgs/doc/packages/citrix.section.md b/nixpkgs/doc/packages/citrix.section.md
new file mode 100644
index 000000000000..bcf0924249bc
--- /dev/null
+++ b/nixpkgs/doc/packages/citrix.section.md
@@ -0,0 +1,32 @@
+# Citrix Workspace {#sec-citrix}
+
+The [Citrix Workspace App](https://www.citrix.com/products/workspace-app/) is a remote desktop viewer which provides access to [XenDesktop](https://www.citrix.com/products/xenapp-xendesktop/) installations.
+
+## Basic usage {#sec-citrix-base}
+
+The tarball archive needs to be downloaded manually, as the license agreements of the vendor for [Citrix Workspace](https://www.citrix.com/downloads/workspace-app/linux/workspace-app-for-linux-latest.html) needs to be accepted first. Then run `nix-prefetch-url file://$PWD/linuxx64-$version.tar.gz`. With the archive available in the store, the package can be built and installed with Nix.
+
+## Citrix Self-service {#sec-citrix-selfservice}
+
+The [self-service](https://support.citrix.com/article/CTX200337) is an application managing Citrix desktops and applications. Please note that this feature only works with at least citrix_workspace_20_06_0 and later versions.
+
+In order to set this up, you first have to [download the `.cr` file from the Netscaler Gateway](https://its.uiowa.edu/support/article/102186). After that, you can configure the `selfservice` like this:
+
+```ShellSession
+$ storebrowse -C ~/Downloads/receiverconfig.cr
+$ selfservice
+```
+
+## Custom certificates {#sec-citrix-custom-certs}
+
+The `Citrix Workspace App` in `nixpkgs` trusts several certificates [from the Mozilla database](https://curl.haxx.se/docs/caextract.html) by default. However, several companies using Citrix might require their own corporate certificate. On distros with imperative packaging, these certs can be stored easily in [`$ICAROOT`](https://citrix.github.io/receiver-for-linux-command-reference/), however this directory is a store path in `nixpkgs`. In order to work around this issue, the package provides a simple mechanism to add custom certificates without rebuilding the entire package using `symlinkJoin`:
+
+```nix
+with import <nixpkgs> { config.allowUnfree = true; };
+let
+  extraCerts = [
+    ./custom-cert-1.pem
+    ./custom-cert-2.pem # ...
+  ];
+in citrix_workspace.override { inherit extraCerts; }
+```
diff --git a/nixpkgs/doc/packages/darwin-builder.section.md b/nixpkgs/doc/packages/darwin-builder.section.md
new file mode 100644
index 000000000000..ca8519c5bf5f
--- /dev/null
+++ b/nixpkgs/doc/packages/darwin-builder.section.md
@@ -0,0 +1,190 @@
+# darwin.linux-builder {#sec-darwin-builder}
+
+:::{.warning}
+By default, `darwin.linux-builder` uses a publicly-known private SSH **host key** (this is different from the SSH key used by the user that connects to the builder).
+
+Given the intended use case for it (a Linux builder that runs **on the same machine**), this shouldn't be an issue.
+However, if you plan to deviate from this use case in any way (e.g. by exposing this builder to remote machines), you should understand the security implications of doing so and take any appropriate measures.
+:::
+
+`darwin.linux-builder` provides a way to bootstrap a Linux remote builder on a macOS machine.
+
+This requires macOS version 12.4 or later.
+
+The remote builder runs on host port 31022 by default.
+You can change it by overriding `virtualisation.darwin-builder.hostPort`.
+See the [example](#sec-darwin-builder-example-flake).
+
+You will also need to be a trusted user for your Nix installation.  In other
+words, your `/etc/nix/nix.conf` should have something like:
+
+```
+extra-trusted-users = <your username goes here>
+```
+
+To launch the remote builder, run the following flake:
+
+```ShellSession
+$ nix run nixpkgs#darwin.linux-builder
+```
+
+That will prompt you to enter your `sudo` password:
+
+```
++ sudo --reset-timestamp /nix/store/…-install-credentials.sh ./keys
+Password:
+```
+
+… so that it can install a private key used to `ssh` into the build server.
+After that the script will launch the virtual machine and automatically log you
+in as the `builder` user:
+
+```
+<<< Welcome to NixOS 22.11.20220901.1bd8d11 (aarch64) - ttyAMA0 >>>
+
+Run 'nixos-help' for the NixOS manual.
+
+nixos login: builder (automatic login)
+
+
+[builder@nixos:~]$
+```
+
+> Note: When you need to stop the VM, run `shutdown now` as the `builder` user.
+
+To delegate builds to the remote builder, add the following options to your
+`nix.conf` file:
+
+```
+# - Replace ${ARCH} with either aarch64 or x86_64 to match your host machine
+# - Replace ${MAX_JOBS} with the maximum number of builds (pick 4 if you're not sure)
+builders = ssh-ng://builder@linux-builder ${ARCH}-linux /etc/nix/builder_ed25519 ${MAX_JOBS} - - - c3NoLWVkMjU1MTkgQUFBQUMzTnphQzFsWkRJMU5URTVBQUFBSUpCV2N4Yi9CbGFxdDFhdU90RStGOFFVV3JVb3RpQzVxQkorVXVFV2RWQ2Igcm9vdEBuaXhvcwo=
+
+# Not strictly necessary, but this will reduce your disk utilization
+builders-use-substitutes = true
+```
+
+To allow Nix to connect to a remote builder not running on port 22, you will also need to create a new file at `/etc/ssh/ssh_config.d/100-linux-builder.conf`:
+
+```
+Host linux-builder
+  Hostname localhost
+  HostKeyAlias linux-builder
+  Port 31022
+```
+
+… and then restart your Nix daemon to apply the change:
+
+```ShellSession
+$ sudo launchctl kickstart -k system/org.nixos.nix-daemon
+```
+
+## Example flake usage {#sec-darwin-builder-example-flake}
+
+```nix
+{
+  inputs = {
+    nixpkgs.url = "github:nixos/nixpkgs/nixpkgs-22.11-darwin";
+    darwin.url = "github:lnl7/nix-darwin/master";
+    darwin.inputs.nixpkgs.follows = "nixpkgs";
+  };
+
+  outputs = { self, darwin, nixpkgs, ... }@inputs:
+  let
+
+    inherit (darwin.lib) darwinSystem;
+    system = "aarch64-darwin";
+    pkgs = nixpkgs.legacyPackages."${system}";
+    linuxSystem = builtins.replaceStrings [ "darwin" ] [ "linux" ] system;
+
+    darwin-builder = nixpkgs.lib.nixosSystem {
+      system = linuxSystem;
+      modules = [
+        "${nixpkgs}/nixos/modules/profiles/macos-builder.nix"
+        { virtualisation = {
+            host.pkgs = pkgs;
+            darwin-builder.workingDirectory = "/var/lib/darwin-builder";
+            darwin-builder.hostPort = 22;
+          };
+        }
+      ];
+    };
+  in {
+
+    darwinConfigurations = {
+      machine1 = darwinSystem {
+        inherit system;
+        modules = [
+          {
+            nix.distributedBuilds = true;
+            nix.buildMachines = [{
+              hostName = "localhost";
+              sshUser = "builder";
+              sshKey = "/etc/nix/builder_ed25519";
+              system = linuxSystem;
+              maxJobs = 4;
+              supportedFeatures = [ "kvm" "benchmark" "big-parallel" ];
+            }];
+
+            launchd.daemons.darwin-builder = {
+              command = "${darwin-builder.config.system.build.macos-builder-installer}/bin/create-builder";
+              serviceConfig = {
+                KeepAlive = true;
+                RunAtLoad = true;
+                StandardOutPath = "/var/log/darwin-builder.log";
+                StandardErrorPath = "/var/log/darwin-builder.log";
+              };
+            };
+          }
+        ];
+      };
+    };
+
+  };
+}
+```
+
+## Reconfiguring the remote builder {#sec-darwin-builder-reconfiguring}
+
+Initially you should not change the remote builder configuration else you will not be
+able to use the binary cache. However, after you have the remote builder running locally
+you may use it to build a modified remote builder with additional storage or memory.
+
+To do this, you just need to set the `virtualisation.darwin-builder.*` parameters as
+in the example below and rebuild.
+
+```nix
+  {
+    darwin-builder = nixpkgs.lib.nixosSystem {
+      system = linuxSystem;
+      modules = [
+        "${nixpkgs}/nixos/modules/profiles/macos-builder.nix"
+        {
+          virtualisation.host.pkgs = pkgs;
+          virtualisation.darwin-builder.diskSize = 5120;
+          virtualisation.darwin-builder.memorySize = 1024;
+          virtualisation.darwin-builder.hostPort = 33022;
+          virtualisation.darwin-builder.workingDirectory = "/var/lib/darwin-builder";
+        }
+      ];
+    };
+  }
+```
+
+You may make any other changes to your VM in this attribute set. For example,
+you could enable Docker or X11 forwarding to your Darwin host.
+
+## Troubleshooting the generated configuration {#sec-darwin-builder-troubleshoot}
+
+The `linux-builder` package exposes the attributes `nixosConfig` and `nixosOptions` that allow you to inspect the generated NixOS configuration in the `nix repl`. For example:
+
+```
+$ nix repl --file ~/src/nixpkgs --argstr system aarch64-darwin
+
+nix-repl> darwin.linux-builder.nixosConfig.nix.package
+«derivation /nix/store/...-nix-2.17.0.drv»
+
+nix-repl> :p darwin.linux-builder.nixosOptions.virtualisation.memorySize.definitionsWithLocations
+[ { file = "/home/user/src/nixpkgs/nixos/modules/profiles/macos-builder.nix"; value = 3072; } ]
+
+```
diff --git a/nixpkgs/doc/packages/dlib.section.md b/nixpkgs/doc/packages/dlib.section.md
new file mode 100644
index 000000000000..bd5b1a20a4d4
--- /dev/null
+++ b/nixpkgs/doc/packages/dlib.section.md
@@ -0,0 +1,13 @@
+# DLib {#dlib}
+
+[DLib](http://dlib.net/) is a modern, C++\-based toolkit which provides several machine learning algorithms.
+
+## Compiling without AVX support {#compiling-without-avx-support}
+
+Especially older CPUs don't support [AVX](https://en.wikipedia.org/wiki/Advanced_Vector_Extensions) (Advanced Vector Extensions) instructions that are used by DLib to optimize their algorithms.
+
+On the affected hardware errors like `Illegal instruction` will occur. In those cases AVX support needs to be disabled:
+
+```nix
+self: super: { dlib = super.dlib.override { avxSupport = false; }; }
+```
diff --git a/nixpkgs/doc/packages/eclipse.section.md b/nixpkgs/doc/packages/eclipse.section.md
new file mode 100644
index 000000000000..acf34b57571a
--- /dev/null
+++ b/nixpkgs/doc/packages/eclipse.section.md
@@ -0,0 +1,68 @@
+# Eclipse {#sec-eclipse}
+
+The Nix expressions related to the Eclipse platform and IDE are in [`pkgs/applications/editors/eclipse`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/eclipse).
+
+Nixpkgs provides a number of packages that will install Eclipse in its various forms. These range from the bare-bones Eclipse Platform to the more fully featured Eclipse SDK or Scala-IDE packages and multiple version are often available. It is possible to list available Eclipse packages by issuing the command:
+
+```ShellSession
+$ nix-env -f '<nixpkgs>' -qaP -A eclipses --description
+```
+
+Once an Eclipse variant is installed, it can be run using the `eclipse` command, as expected. From within Eclipse, it is then possible to install plugins in the usual manner by either manually specifying an Eclipse update site or by installing the Marketplace Client plugin and using it to discover and install other plugins. This installation method provides an Eclipse installation that closely resemble a manually installed Eclipse.
+
+If you prefer to install plugins in a more declarative manner, then Nixpkgs also offer a number of Eclipse plugins that can be installed in an _Eclipse environment_. This type of environment is created using the function `eclipseWithPlugins` found inside the `nixpkgs.eclipses` attribute set. This function takes as argument `{ eclipse, plugins ? [], jvmArgs ? [] }` where `eclipse` is a one of the Eclipse packages described above, `plugins` is a list of plugin derivations, and `jvmArgs` is a list of arguments given to the JVM running the Eclipse. For example, say you wish to install the latest Eclipse Platform with the popular Eclipse Color Theme plugin and also allow Eclipse to use more RAM. You could then add:
+
+```nix
+{
+  packageOverrides = pkgs: {
+    myEclipse = with pkgs.eclipses; eclipseWithPlugins {
+      eclipse = eclipse-platform;
+      jvmArgs = [ "-Xmx2048m" ];
+      plugins = [ plugins.color-theme ];
+    };
+  };
+}
+```
+
+to your Nixpkgs configuration (`~/.config/nixpkgs/config.nix`) and install it by running `nix-env -f '<nixpkgs>' -iA myEclipse` and afterward run Eclipse as usual. It is possible to find out which plugins are available for installation using `eclipseWithPlugins` by running:
+
+```ShellSession
+$ nix-env -f '<nixpkgs>' -qaP -A eclipses.plugins --description
+```
+
+If there is a need to install plugins that are not available in Nixpkgs then it may be possible to define these plugins outside Nixpkgs using the `buildEclipseUpdateSite` and `buildEclipsePlugin` functions found in the `nixpkgs.eclipses.plugins` attribute set. Use the `buildEclipseUpdateSite` function to install a plugin distributed as an Eclipse update site. This function takes `{ name, src }` as argument, where `src` indicates the Eclipse update site archive. All Eclipse features and plugins within the downloaded update site will be installed. When an update site archive is not available, then the `buildEclipsePlugin` function can be used to install a plugin that consists of a pair of feature and plugin JARs. This function takes an argument `{ name, srcFeature, srcPlugin }` where `srcFeature` and `srcPlugin` are the feature and plugin JARs, respectively.
+
+Expanding the previous example with two plugins using the above functions, we have:
+
+```nix
+{
+  packageOverrides = pkgs: {
+    myEclipse = with pkgs.eclipses; eclipseWithPlugins {
+      eclipse = eclipse-platform;
+      jvmArgs = [ "-Xmx2048m" ];
+      plugins = [
+        plugins.color-theme
+        (plugins.buildEclipsePlugin {
+          name = "myplugin1-1.0";
+          srcFeature = fetchurl {
+            url = "http://…/features/myplugin1.jar";
+            hash = "sha256-123…";
+          };
+          srcPlugin = fetchurl {
+            url = "http://…/plugins/myplugin1.jar";
+            hash = "sha256-123…";
+          };
+        })
+        (plugins.buildEclipseUpdateSite {
+          name = "myplugin2-1.0";
+          src = fetchurl {
+            stripRoot = false;
+            url = "http://…/myplugin2.zip";
+            hash = "sha256-123…";
+          };
+        })
+      ];
+    };
+  };
+}
+```
diff --git a/nixpkgs/doc/packages/elm.section.md b/nixpkgs/doc/packages/elm.section.md
new file mode 100644
index 000000000000..063dd73d9de4
--- /dev/null
+++ b/nixpkgs/doc/packages/elm.section.md
@@ -0,0 +1,11 @@
+# Elm {#sec-elm}
+
+To start a development environment, run:
+
+```ShellSession
+nix-shell -p elmPackages.elm elmPackages.elm-format
+```
+
+To update the Elm compiler, see `nixpkgs/pkgs/development/compilers/elm/README.md`.
+
+To package Elm applications, [read about elm2nix](https://github.com/hercules-ci/elm2nix#elm2nix).
diff --git a/nixpkgs/doc/packages/emacs.section.md b/nixpkgs/doc/packages/emacs.section.md
new file mode 100644
index 000000000000..2ced251f3e46
--- /dev/null
+++ b/nixpkgs/doc/packages/emacs.section.md
@@ -0,0 +1,118 @@
+# Emacs {#sec-emacs}
+
+## Configuring Emacs {#sec-emacs-config}
+
+The Emacs package comes with some extra helpers to make it easier to configure. `emacs.pkgs.withPackages` allows you to manage packages from ELPA. This means that you will not have to install that packages from within Emacs. For instance, if you wanted to use `company` `counsel`, `flycheck`, `ivy`, `magit`, `projectile`, and `use-package` you could use this as a `~/.config/nixpkgs/config.nix` override:
+
+```nix
+{
+  packageOverrides = pkgs: with pkgs; {
+    myEmacs = emacs.pkgs.withPackages (epkgs: (with epkgs.melpaStablePackages; [
+      company
+      counsel
+      flycheck
+      ivy
+      magit
+      projectile
+      use-package
+    ]));
+  };
+}
+```
+
+You can install it like any other packages via `nix-env -iA myEmacs`. However, this will only install those packages. It will not `configure` them for us. To do this, we need to provide a configuration file. Luckily, it is possible to do this from within Nix! By modifying the above example, we can make Emacs load a custom config file. The key is to create a package that provides a `default.el` file in `/share/emacs/site-start/`. Emacs knows to load this file automatically when it starts.
+
+```nix
+{
+  packageOverrides = pkgs: with pkgs; rec {
+    myEmacsConfig = writeText "default.el" ''
+      (eval-when-compile
+        (require 'use-package))
+
+      ;; load some packages
+
+      (use-package company
+        :bind ("<C-tab>" . company-complete)
+        :diminish company-mode
+        :commands (company-mode global-company-mode)
+        :defer 1
+        :config
+        (global-company-mode))
+
+      (use-package counsel
+        :commands (counsel-descbinds)
+        :bind (([remap execute-extended-command] . counsel-M-x)
+               ("C-x C-f" . counsel-find-file)
+               ("C-c g" . counsel-git)
+               ("C-c j" . counsel-git-grep)
+               ("C-c k" . counsel-ag)
+               ("C-x l" . counsel-locate)
+               ("M-y" . counsel-yank-pop)))
+
+      (use-package flycheck
+        :defer 2
+        :config (global-flycheck-mode))
+
+      (use-package ivy
+        :defer 1
+        :bind (("C-c C-r" . ivy-resume)
+               ("C-x C-b" . ivy-switch-buffer)
+               :map ivy-minibuffer-map
+               ("C-j" . ivy-call))
+        :diminish ivy-mode
+        :commands ivy-mode
+        :config
+        (ivy-mode 1))
+
+      (use-package magit
+        :defer
+        :if (executable-find "git")
+        :bind (("C-x g" . magit-status)
+               ("C-x G" . magit-dispatch-popup))
+        :init
+        (setq magit-completing-read-function 'ivy-completing-read))
+
+      (use-package projectile
+        :commands projectile-mode
+        :bind-keymap ("C-c p" . projectile-command-map)
+        :defer 5
+        :config
+        (projectile-global-mode))
+    '';
+
+    myEmacs = emacs.pkgs.withPackages (epkgs: (with epkgs.melpaStablePackages; [
+      (runCommand "default.el" {} ''
+         mkdir -p $out/share/emacs/site-lisp
+         cp ${myEmacsConfig} $out/share/emacs/site-lisp/default.el
+       '')
+      company
+      counsel
+      flycheck
+      ivy
+      magit
+      projectile
+      use-package
+    ]));
+  };
+}
+```
+
+This provides a fairly full Emacs start file. It will load in addition to the user's personal config. You can always disable it by passing `-q` to the Emacs command.
+
+Sometimes `emacs.pkgs.withPackages` is not enough, as this package set has some priorities imposed on packages (with the lowest priority assigned to GNU-devel ELPA, and the highest for packages manually defined in `pkgs/applications/editors/emacs/elisp-packages/manual-packages`). But you can't control these priorities when some package is installed as a dependency. You can override it on a per-package-basis, providing all the required dependencies manually, but it's tedious and there is always a possibility that an unwanted dependency will sneak in through some other package. To completely override such a package, you can use `overrideScope`.
+
+```nix
+let
+  overrides = self: super: rec {
+    haskell-mode = self.melpaPackages.haskell-mode;
+    # ...
+  };
+in
+((emacsPackagesFor emacs).overrideScope overrides).withPackages
+  (p: with p; [
+    # here both these package will use haskell-mode of our own choice
+    ghc-mod
+    dante
+  ])
+```
+}
diff --git a/nixpkgs/doc/packages/etc-files.section.md b/nixpkgs/doc/packages/etc-files.section.md
new file mode 100644
index 000000000000..94a769ed3355
--- /dev/null
+++ b/nixpkgs/doc/packages/etc-files.section.md
@@ -0,0 +1,18 @@
+# /etc files {#etc}
+
+Certain calls in glibc require access to runtime files found in `/etc` such as `/etc/protocols` or `/etc/services` -- [getprotobyname](https://linux.die.net/man/3/getprotobyname) is one such function.
+
+On non-NixOS distributions these files are typically provided by packages (i.e., [netbase](https://packages.debian.org/sid/netbase)) if not already pre-installed in your distribution. This can cause non-reproducibility for code if they rely on these files being present.
+
+If [iana-etc](https://hydra.nixos.org/job/nixos/trunk-combined/nixpkgs.iana-etc.x86_64-linux) is part of your `buildInputs`, then it will set the environment variables `NIX_ETC_PROTOCOLS` and `NIX_ETC_SERVICES` to the corresponding files in the package through a setup hook.
+
+
+```bash
+> nix-shell -p iana-etc
+
+[nix-shell:~]$ env | grep NIX_ETC
+NIX_ETC_SERVICES=/nix/store/aj866hr8fad8flnggwdhrldm0g799ccz-iana-etc-20210225/etc/services
+NIX_ETC_PROTOCOLS=/nix/store/aj866hr8fad8flnggwdhrldm0g799ccz-iana-etc-20210225/etc/protocols
+```
+
+Nixpkg's version of [glibc](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/libraries/glibc/default.nix) has been patched to check for the existence of these environment variables. If the environment variables are *not* set, then it will attempt to find the files at the default location within `/etc`.
diff --git a/nixpkgs/doc/packages/firefox.section.md b/nixpkgs/doc/packages/firefox.section.md
new file mode 100644
index 000000000000..46bc0457a3dc
--- /dev/null
+++ b/nixpkgs/doc/packages/firefox.section.md
@@ -0,0 +1,55 @@
+# Firefox {#sec-firefox}
+
+## Build wrapped Firefox with extensions and policies {#build-wrapped-firefox-with-extensions-and-policies}
+
+The `wrapFirefox` function allows to pass policies, preferences and extensions that are available to Firefox. With the help of `fetchFirefoxAddon` this allows to build a Firefox version that already comes with add-ons pre-installed:
+
+```nix
+{
+  # Nix firefox addons only work with the firefox-esr package.
+  myFirefox = wrapFirefox firefox-esr-unwrapped {
+    nixExtensions = [
+      (fetchFirefoxAddon {
+        name = "ublock"; # Has to be unique!
+        url = "https://addons.mozilla.org/firefox/downloads/file/3679754/ublock_origin-1.31.0-an+fx.xpi";
+        hash = "sha256-2e73AbmYZlZXCP5ptYVcFjQYdjDp4iPoEPEOSCVF5sA=";
+      })
+    ];
+
+    extraPolicies = {
+      CaptivePortal = false;
+      DisableFirefoxStudies = true;
+      DisablePocket = true;
+      DisableTelemetry = true;
+      DisableFirefoxAccounts = true;
+      FirefoxHome = {
+        Pocket = false;
+        Snippets = false;
+      };
+      UserMessaging = {
+        ExtensionRecommendations = false;
+        SkipOnboarding = true;
+      };
+      SecurityDevices = {
+        # Use a proxy module rather than `nixpkgs.config.firefox.smartcardSupport = true`
+        "PKCS#11 Proxy Module" = "${pkgs.p11-kit}/lib/p11-kit-proxy.so";
+      };
+    };
+
+    extraPrefs = ''
+      // Show more ssl cert infos
+      lockPref("security.identityblock.show_extended_validation", true);
+    '';
+  };
+}
+```
+
+If `nixExtensions != null`, then all manually installed add-ons will be uninstalled from your browser profile.
+To view available enterprise policies, visit [enterprise policies](https://github.com/mozilla/policy-templates#enterprisepoliciesenabled)
+or type into the Firefox URL bar: `about:policies#documentation`.
+Nix installed add-ons do not have a valid signature, which is why signature verification is disabled. This does not compromise security because downloaded add-ons are checksummed and manual add-ons can't be installed. Also, make sure that the `name` field of `fetchFirefoxAddon` is unique. If you remove an add-on from the `nixExtensions` array, rebuild and start Firefox: the removed add-on will be completely removed with all of its settings.
+
+## Troubleshooting {#sec-firefox-troubleshooting}
+If add-ons are marked as broken or the signature is invalid, make sure you have Firefox ESR installed. Normal Firefox does not provide the ability anymore to disable signature verification for add-ons thus nix add-ons get disabled by the normal Firefox binary.
+
+If add-ons do not appear installed despite being defined in your nix configuration file, reset the local add-on state of your Firefox profile by clicking `Help -> More Troubleshooting Information -> Refresh Firefox`. This can happen if you switch from manual add-on mode to nix add-on mode and then back to manual mode and then again to nix add-on mode.
diff --git a/nixpkgs/doc/packages/fish.section.md b/nixpkgs/doc/packages/fish.section.md
new file mode 100644
index 000000000000..85b57acd1090
--- /dev/null
+++ b/nixpkgs/doc/packages/fish.section.md
@@ -0,0 +1,50 @@
+# Fish {#sec-fish}
+
+Fish is a "smart and user-friendly command line shell" with support for plugins.
+
+
+## Vendor Fish scripts {#sec-fish-vendor}
+
+Any package may ship its own Fish completions, configuration snippets, and
+functions. Those should be installed to
+`$out/share/fish/vendor_{completions,conf,functions}.d` respectively.
+
+When the `programs.fish.enable` and
+`programs.fish.vendor.{completions,config,functions}.enable` options from the
+NixOS Fish module are set to true, those paths are symlinked in the current
+system environment and automatically loaded by Fish.
+
+
+## Packaging Fish plugins {#sec-fish-plugins-pkg}
+
+While packages providing standalone executables belong to the top level,
+packages which have the sole purpose of extending Fish belong to the
+`fishPlugins` scope and should be registered in
+`pkgs/shells/fish/plugins/default.nix`.
+
+The `buildFishPlugin` utility function can be used to automatically copy Fish
+scripts from `$src/{completions,conf,conf.d,functions}` to the standard vendor
+installation paths. It also sets up the test environment so that the optional
+`checkPhase` is executed in a Fish shell with other already packaged plugins
+and package-local Fish functions specified in `checkPlugins` and
+`checkFunctionDirs` respectively.
+
+See `pkgs/shells/fish/plugins/pure.nix` for an example of Fish plugin package
+using `buildFishPlugin` and running unit tests with the `fishtape` test runner.
+
+
+## Fish wrapper {#sec-fish-wrapper}
+
+The `wrapFish` package is a wrapper around Fish which can be used to create
+Fish shells initialized with some plugins as well as completions, configuration
+snippets and functions sourced from the given paths. This provides a convenient
+way to test Fish plugins and scripts without having to alter the environment.
+
+```nix
+wrapFish {
+  pluginPkgs = with fishPlugins; [ pure foreign-env ];
+  completionDirs = [];
+  functionDirs = [];
+  confDirs = [ "/path/to/some/fish/init/dir/" ];
+}
+```
diff --git a/nixpkgs/doc/packages/fuse.section.md b/nixpkgs/doc/packages/fuse.section.md
new file mode 100644
index 000000000000..6deea6b5626e
--- /dev/null
+++ b/nixpkgs/doc/packages/fuse.section.md
@@ -0,0 +1,45 @@
+# FUSE {#sec-fuse}
+
+Some packages rely on
+[FUSE](https://www.kernel.org/doc/html/latest/filesystems/fuse.html) to provide
+support for additional filesystems not supported by the kernel.
+
+In general, FUSE software are primarily developed for Linux but many of them can
+also run on macOS. Nixpkgs supports FUSE packages on macOS, but it requires
+[macFUSE](https://osxfuse.github.io) to be installed outside of Nix. macFUSE
+currently isn't packaged in Nixpkgs mainly because it includes a kernel
+extension, which isn't supported by Nix outside of NixOS.
+
+If a package fails to run on macOS with an error message similar to the
+following, it's a likely sign that you need to have macFUSE installed.
+
+    dyld: Library not loaded: /usr/local/lib/libfuse.2.dylib
+    Referenced from: /nix/store/w8bi72bssv0bnxhwfw3xr1mvn7myf37x-sshfs-fuse-2.10/bin/sshfs
+    Reason: image not found
+    [1]    92299 abort      /nix/store/w8bi72bssv0bnxhwfw3xr1mvn7myf37x-sshfs-fuse-2.10/bin/sshfs
+
+Package maintainers may often encounter the following error when building FUSE
+packages on macOS:
+
+    checking for fuse.h... no
+    configure: error: No fuse.h found.
+
+This happens on autoconf based projects that use `AC_CHECK_HEADERS` or
+`AC_CHECK_LIBS` to detect libfuse, and will occur even when the `fuse` package
+is included in `buildInputs`. It happens because libfuse headers throw an error
+on macOS if the `FUSE_USE_VERSION` macro is undefined. Many projects do define
+`FUSE_USE_VERSION`, but only inside C source files. This results in the above
+error at configure time because the configure script would attempt to compile
+sample FUSE programs without defining `FUSE_USE_VERSION`.
+
+There are two possible solutions for this problem in Nixpkgs:
+
+1. Pass `FUSE_USE_VERSION` to the configure script by adding
+   `CFLAGS=-DFUSE_USE_VERSION=25` in `configureFlags`. The actual value would
+   have to match the definition used in the upstream source code.
+2. Remove `AC_CHECK_HEADERS` / `AC_CHECK_LIBS` for libfuse.
+
+However, a better solution might be to fix the build script upstream to use
+`PKG_CHECK_MODULES` instead. This approach wouldn't suffer from the problem that
+`AC_CHECK_HEADERS`/`AC_CHECK_LIBS` has at the price of introducing a dependency
+on pkg-config.
diff --git a/nixpkgs/doc/packages/ibus.section.md b/nixpkgs/doc/packages/ibus.section.md
new file mode 100644
index 000000000000..817e55d56f1f
--- /dev/null
+++ b/nixpkgs/doc/packages/ibus.section.md
@@ -0,0 +1,40 @@
+# ibus-engines.typing-booster {#sec-ibus-typing-booster}
+
+This package is an ibus-based completion method to speed up typing.
+
+## Activating the engine {#sec-ibus-typing-booster-activate}
+
+IBus needs to be configured accordingly to activate `typing-booster`. The configuration depends on the desktop manager in use. For detailed instructions, please refer to the [upstream docs](https://mike-fabian.github.io/ibus-typing-booster/).
+
+On NixOS, you need to explicitly enable `ibus` with given engines before customizing your desktop to use `typing-booster`. This can be achieved using the `ibus` module:
+
+```nix
+{ pkgs, ... }: {
+  i18n.inputMethod = {
+    enabled = "ibus";
+    ibus.engines = with pkgs.ibus-engines; [ typing-booster ];
+  };
+}
+```
+
+## Using custom hunspell dictionaries {#sec-ibus-typing-booster-customize-hunspell}
+
+The IBus engine is based on `hunspell` to support completion in many languages. By default, the dictionaries `de-de`, `en-us`, `fr-moderne` `es-es`, `it-it`, `sv-se` and `sv-fi` are in use. To add another dictionary, the package can be overridden like this:
+
+```nix
+ibus-engines.typing-booster.override { langs = [ "de-at" "en-gb" ]; }
+```
+
+_Note: each language passed to `langs` must be an attribute name in `pkgs.hunspellDicts`._
+
+## Built-in emoji picker {#sec-ibus-typing-booster-emoji-picker}
+
+The `ibus-engines.typing-booster` package contains a program named `emoji-picker`. To display all emojis correctly, a special font such as `noto-fonts-color-emoji` is needed:
+
+On NixOS, it can be installed using the following expression:
+
+```nix
+{ pkgs, ... }: {
+  fonts.packages = with pkgs; [ noto-fonts-color-emoji ];
+}
+```
diff --git a/nixpkgs/doc/packages/index.md b/nixpkgs/doc/packages/index.md
new file mode 100644
index 000000000000..1f45018ffc4a
--- /dev/null
+++ b/nixpkgs/doc/packages/index.md
@@ -0,0 +1,28 @@
+# Packages {#chap-packages}
+
+This chapter contains information about how to use and maintain the Nix expressions for a number of specific packages, such as the Linux kernel or X.org.
+
+```{=include=} sections
+citrix.section.md
+darwin-builder.section.md
+dlib.section.md
+eclipse.section.md
+elm.section.md
+emacs.section.md
+firefox.section.md
+fish.section.md
+fuse.section.md
+ibus.section.md
+kakoune.section.md
+linux.section.md
+locales.section.md
+etc-files.section.md
+nginx.section.md
+opengl.section.md
+shell-helpers.section.md
+steam.section.md
+cataclysm-dda.section.md
+urxvt.section.md
+weechat.section.md
+xorg.section.md
+```
diff --git a/nixpkgs/doc/packages/kakoune.section.md b/nixpkgs/doc/packages/kakoune.section.md
new file mode 100644
index 000000000000..8e054777a757
--- /dev/null
+++ b/nixpkgs/doc/packages/kakoune.section.md
@@ -0,0 +1,9 @@
+# Kakoune {#sec-kakoune}
+
+Kakoune can be built to autoload plugins:
+
+```nix
+(kakoune.override {
+  plugins = with pkgs.kakounePlugins; [ parinfer-rust ];
+})
+```
diff --git a/nixpkgs/doc/packages/linux.section.md b/nixpkgs/doc/packages/linux.section.md
new file mode 100644
index 000000000000..4c3b2a3b132a
--- /dev/null
+++ b/nixpkgs/doc/packages/linux.section.md
@@ -0,0 +1,113 @@
+# Linux kernel {#sec-linux-kernel}
+
+The Nix expressions to build the Linux kernel are in [`pkgs/os-specific/linux/kernel`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/os-specific/linux/kernel).
+
+The function [`pkgs.buildLinux`](https://github.com/NixOS/nixpkgs/blob/d77bda728d5041c1294a68fb25c79e2d161f62b9/pkgs/os-specific/linux/kernel/generic.nix) builds a kernel with [common configuration values](https://github.com/NixOS/nixpkgs/blob/d77bda728d5041c1294a68fb25c79e2d161f62b9/pkgs/os-specific/linux/kernel/common-config.nix).
+This is the preferred option unless you have a very specific use case.
+Most kernels packaged in Nixpkgs are built that way, and it will also generate kernels suitable for NixOS.
+[`pkgs.linuxManualConfig`](https://github.com/NixOS/nixpkgs/blob/d77bda728d5041c1294a68fb25c79e2d161f62b9/pkgs/os-specific/linux/kernel/manual-config.nix) requires a complete configuration to be passed.
+It has fewer additional features than `pkgs.buildLinux`, which provides common configuration values and exposes the `features` attribute, as explained below.
+
+Both functions have an argument `kernelPatches` which should be a list of `{name, patch, extraConfig}` attribute sets, where `name` is the name of the patch (which is included in the kernel’s `meta.description` attribute), `patch` is the patch itself (possibly compressed), and `extraConfig` (optional) is a string specifying extra options to be concatenated to the kernel configuration file (`.config`).
+
+The kernel derivation created with `pkgs.buildLinux` exports an attribute `features` specifying whether optional functionality is or isn’t enabled. This is used in NixOS to implement kernel-specific behaviour.
+
+If you are using a kernel packaged in Nixpkgs, you can customize it by overriding its arguments. For details on how each argument affects the generated kernel, refer to [the `pkgs.buildLinux` source code](https://github.com/NixOS/nixpkgs/blob/d77bda728d5041c1294a68fb25c79e2d161f62b9/pkgs/os-specific/linux/kernel/generic.nix).
+
+:::{.example #ex-overriding-kernel-derivation}
+
+# Overriding the kernel derivation
+
+Assuming you are using the kernel from `pkgs.linux_latest`:
+
+```nix
+pkgs.linux_latest.override {
+  ignoreConfigErrors = true;
+  autoModules = false;
+  kernelPreferBuiltin = true;
+  extraStructuredConfig = with lib.kernel; {
+    DEBUG_KERNEL = yes;
+    FRAME_POINTER = yes;
+    KGDB = yes;
+    KGDB_SERIAL_CONSOLE = yes;
+    DEBUG_INFO = yes;
+  };
+}
+```
+
+:::
+
+## Manual kernel configuration {#sec-manual-kernel-configuration}
+
+Sometimes it may not be desirable to use kernels built with `pkgs.buildLinux`, especially if most of the common configuration has to be altered or disabled to achieve a kernel as expected by the target use case.
+An example of this is building a kernel for use in a VM or micro VM. You can use `pkgs.linuxManualConfig` in these cases. It requires the `src`, `version`, and `configfile` attributes to be specified.
+
+:::{.example #ex-using-linux-manual-config}
+
+# Using `pkgs.linuxManualConfig` with a specific source, version, and config file
+
+```nix
+{ pkgs, ... }: {
+  version = "6.1.55";
+  src = pkgs.fetchurl {
+    url = "https://cdn.kernel.org/pub/linux/kernel/v6.x/linux-${version}.tar.xz";
+    hash = "sha256:1h0mzx52q9pvdv7rhnvb8g68i7bnlc9rf8gy9qn4alsxq4g28zm8";
+  };
+  configfile = ./path_to_config_file;
+  linux = pkgs.linuxManualConfig {
+    inherit version src configfile;
+    allowImportFromDerivation = true;
+  };
+}
+```
+
+If necessary, the version string can be slightly modified to explicitly mark it as a custom version. If you do so, ensure the `modDirVersion` attribute matches the source's version, otherwise the build will fail.
+
+```nix
+{ pkgs, ... }: {
+  version = "6.1.55-custom";
+  modDirVersion = "6.1.55";
+  src = pkgs.fetchurl {
+    url = "https://cdn.kernel.org/pub/linux/kernel/v6.x/linux-${modDirVersion}.tar.xz";
+    hash = "sha256:1h0mzx52q9pvdv7rhnvb8g68i7bnlc9rf8gy9qn4alsxq4g28zm8";
+  };
+  configfile = ./path_to_config_file;
+  linux = pkgs.linuxManualConfig {
+    inherit version modDirVersion src configfile;
+    allowImportFromDerivation = true;
+  };
+}
+```
+
+:::
+
+Additional attributes can be used with `linuxManualConfig` for further customisation. You're encouraged to read [the `pkgs.linuxManualConfig` source code](https://github.com/NixOS/nixpkgs/blob/d77bda728d5041c1294a68fb25c79e2d161f62b9/pkgs/os-specific/linux/kernel/manual-config.nix) to understand how to use them.
+
+To edit the `.config` file for Linux X.Y from within Nix, proceed as follows:
+
+```ShellSession
+$ nix-shell '<nixpkgs>' -A linuxKernel.kernels.linux_X_Y.configEnv
+$ unpackPhase
+$ cd linux-*
+$ make nconfig
+```
+
+## Developing kernel modules {#sec-linux-kernel-developing-modules}
+
+When developing kernel modules it's often convenient to run the edit-compile-run loop as quickly as possible.
+See the snippet below as an example.
+
+:::{.example #ex-edit-compile-run-kernel-modules}
+
+# Edit-compile-run loop when developing `mellanox` drivers
+
+```ShellSession
+$ nix-build '<nixpkgs>' -A linuxPackages.kernel.dev
+$ nix-shell '<nixpkgs>' -A linuxPackages.kernel
+$ unpackPhase
+$ cd linux-*
+$ make -C $dev/lib/modules/*/build M=$(pwd)/drivers/net/ethernet/mellanox modules
+# insmod ./drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.ko
+```
+
+:::
diff --git a/nixpkgs/doc/packages/locales.section.md b/nixpkgs/doc/packages/locales.section.md
new file mode 100644
index 000000000000..3a983f13a396
--- /dev/null
+++ b/nixpkgs/doc/packages/locales.section.md
@@ -0,0 +1,5 @@
+# Locales {#locales}
+
+To allow simultaneous use of packages linked against different versions of `glibc` with different locale archive formats, Nixpkgs patches `glibc` to rely on `LOCALE_ARCHIVE` environment variable.
+
+On non-NixOS distributions, this variable is obviously not set. This can cause regressions in language support or even crashes in some Nixpkgs-provided programs. The simplest way to mitigate this problem is exporting the `LOCALE_ARCHIVE` variable pointing to `${glibcLocales}/lib/locale/locale-archive`. The drawback (and the reason this is not the default) is the relatively large (a hundred MiB) size of the full set of locales. It is possible to build a custom set of locales by overriding parameters `allLocales` and `locales` of the package.
diff --git a/nixpkgs/doc/packages/nginx.section.md b/nixpkgs/doc/packages/nginx.section.md
new file mode 100644
index 000000000000..41241304ceff
--- /dev/null
+++ b/nixpkgs/doc/packages/nginx.section.md
@@ -0,0 +1,11 @@
+# Nginx {#sec-nginx}
+
+[Nginx](https://nginx.org) is a reverse proxy and lightweight webserver.
+
+## ETags on static files served from the Nix store {#sec-nginx-etag}
+
+HTTP has a couple of different mechanisms for caching to prevent clients from having to download the same content repeatedly if a resource has not changed since the last time it was requested. When nginx is used as a server for static files, it implements the caching mechanism based on the [`Last-Modified`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Last-Modified) response header automatically; unfortunately, it works by using filesystem timestamps to determine the value of the `Last-Modified` header. This doesn't give the desired behavior when the file is in the Nix store because all file timestamps are set to 0 (for reasons related to build reproducibility).
+
+Fortunately, HTTP supports an alternative (and more effective) caching mechanism: the [`ETag`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag) response header. The value of the `ETag` header specifies some identifier for the particular content that the server is sending (e.g., a hash). When a client makes a second request for the same resource, it sends that value back in an `If-None-Match` header. If the ETag value is unchanged, then the server does not need to resend the content.
+
+As of NixOS 19.09, the nginx package in Nixpkgs is patched such that when nginx serves a file out of `/nix/store`, the hash in the store path is used as the `ETag` header in the HTTP response, thus providing proper caching functionality. With NixOS 24.05 and later, the `ETag` additionally includes the response content length, to ensure files served with static compression do not share `ETag`s with their uncompressed version. This `ETag` functionality is enabled automatically; you do not need to do modify any configuration to get this behavior.
diff --git a/nixpkgs/doc/packages/opengl.section.md b/nixpkgs/doc/packages/opengl.section.md
new file mode 100644
index 000000000000..f4d282267a07
--- /dev/null
+++ b/nixpkgs/doc/packages/opengl.section.md
@@ -0,0 +1,15 @@
+# OpenGL {#sec-opengl}
+
+OpenGL support varies depending on which hardware is used and which drivers are available and loaded.
+
+Broadly, we support both GL vendors: Mesa and NVIDIA.
+
+## NixOS Desktop {#nixos-desktop}
+
+The NixOS desktop or other non-headless configurations are the primary target for OpenGL libraries and applications. The current solution for discovering which drivers are available is based on [libglvnd](https://gitlab.freedesktop.org/glvnd/libglvnd). `libglvnd` performs "vendor-neutral dispatch", trying a variety of techniques to find the system's GL implementation. In practice, this will be either via standard GLX for X11 users or EGL for Wayland users, and supporting either NVIDIA or Mesa extensions.
+
+## Nix on GNU/Linux {#nix-on-gnulinux}
+
+If you are using a non-NixOS GNU/Linux/X11 desktop with free software video drivers, consider launching OpenGL-dependent programs from Nixpkgs with Nixpkgs versions of `libglvnd` and `mesa.drivers` in `LD_LIBRARY_PATH`. For Mesa drivers, the Linux kernel version doesn't have to match nixpkgs.
+
+For proprietary video drivers, you might have luck with also adding the corresponding video driver package.
diff --git a/nixpkgs/doc/packages/shell-helpers.section.md b/nixpkgs/doc/packages/shell-helpers.section.md
new file mode 100644
index 000000000000..e7c2b0abebfc
--- /dev/null
+++ b/nixpkgs/doc/packages/shell-helpers.section.md
@@ -0,0 +1,12 @@
+# Interactive shell helpers {#sec-shell-helpers}
+
+Some packages provide the shell integration to be more useful. But unlike other systems, nix doesn't have a standard `share` directory location. This is why a bunch `PACKAGE-share` scripts are shipped that print the location of the corresponding shared folder. Current list of such packages is as following:
+
+- `fzf` : `fzf-share`
+
+E.g. `fzf` can then be used in the `.bashrc` like this:
+
+```bash
+source "$(fzf-share)/completion.bash"
+source "$(fzf-share)/key-bindings.bash"
+```
diff --git a/nixpkgs/doc/packages/steam.section.md b/nixpkgs/doc/packages/steam.section.md
new file mode 100644
index 000000000000..c9a09962f62d
--- /dev/null
+++ b/nixpkgs/doc/packages/steam.section.md
@@ -0,0 +1,63 @@
+# Steam {#sec-steam}
+
+## Steam in Nix {#sec-steam-nix}
+
+Steam is distributed as a `.deb` file, for now only as an i686 package (the amd64 package only has documentation). When unpacked, it has a script called `steam` that in Ubuntu (their target distro) would go to `/usr/bin`. When run for the first time, this script copies some files to the user's home, which include another script that is the ultimate responsible for launching the steam binary, which is also in `$HOME`.
+
+Nix problems and constraints:
+
+- We don't have `/bin/bash` and many scripts point there. Same thing for `/usr/bin/python`.
+- We don't have the dynamic loader in `/lib`.
+- The `steam.sh` script in `$HOME` cannot be patched, as it is checked and rewritten by steam.
+- The steam binary cannot be patched, it's also checked.
+
+The current approach to deploy Steam in NixOS is composing a FHS-compatible chroot environment, as documented [here](https://sandervanderburg.blogspot.com/2013/09/composing-fhs-compatible-chroot.html). This allows us to have binaries in the expected paths without disrupting the system, and to avoid patching them to work in a non FHS environment.
+
+## How to play {#sec-steam-play}
+
+Use `programs.steam.enable = true;` if you want to add steam to `systemPackages` and also enable a few workarounds as well as Steam controller support or other Steam supported controllers such as the DualShock 4 or Nintendo Switch Pro Controller.
+
+## Troubleshooting {#sec-steam-troub}
+
+- **Steam fails to start. What do I do?**
+
+  Try to run
+
+  ```ShellSession
+  strace steam
+  ```
+
+  to see what is causing steam to fail.
+
+- **Using the FOSS Radeon or nouveau (nvidia) drivers**
+
+  - The `newStdcpp` parameter was removed since NixOS 17.09 and should not be needed anymore.
+  - Steam ships statically linked with a version of `libcrypto` that conflicts with the one dynamically loaded by radeonsi_dri.so. If you get the error:
+
+    ```
+    steam.sh: line 713: 7842 Segmentation fault (core dumped)
+    ```
+
+    have a look at [this pull request](https://github.com/NixOS/nixpkgs/pull/20269).
+
+- **Java**
+
+  1. There is no java in steam chrootenv by default. If you get a message like:
+
+    ```
+    /home/foo/.local/share/Steam/SteamApps/common/towns/towns.sh: line 1: java: command not found
+    ```
+
+    you need to add:
+
+    ```nix
+    steam.override { withJava = true; }
+    ```
+
+## steam-run {#sec-steam-run}
+
+The FHS-compatible chroot used for Steam can also be used to run other Linux games that expect a FHS environment. To use it, install the `steam-run` package and run the game with:
+
+```
+steam-run ./foo
+```
diff --git a/nixpkgs/doc/packages/urxvt.section.md b/nixpkgs/doc/packages/urxvt.section.md
new file mode 100644
index 000000000000..1d40c92ed73f
--- /dev/null
+++ b/nixpkgs/doc/packages/urxvt.section.md
@@ -0,0 +1,73 @@
+# Urxvt {#sec-urxvt}
+
+Urxvt, also known as rxvt-unicode, is a highly customizable terminal emulator.
+
+## Configuring urxvt {#sec-urxvt-conf}
+
+In `nixpkgs`, urxvt is provided by the package `rxvt-unicode`. It can be configured to include your choice of plugins, reducing its closure size from the default configuration which includes all available plugins. To make use of this functionality, use an overlay or directly install an expression that overrides its configuration, such as:
+
+```nix
+rxvt-unicode.override {
+  configure = { availablePlugins, ... }: {
+    plugins = with availablePlugins; [ perls resize-font vtwheel ];
+  };
+}
+```
+
+If the `configure` function returns an attrset without the `plugins` attribute, `availablePlugins` will be used automatically.
+
+In order to add plugins but also keep all default plugins installed, it is possible to use the following method:
+
+```nix
+rxvt-unicode.override {
+  configure = { availablePlugins, ... }: {
+    plugins = (builtins.attrValues availablePlugins) ++ [ custom-plugin ];
+  };
+}
+```
+
+To get a list of all the plugins available, open the Nix REPL and run
+
+```ShellSession
+$ nix repl
+:l <nixpkgs>
+map (p: p.name) pkgs.rxvt-unicode.plugins
+```
+
+Alternatively, if your shell is bash or zsh and have completion enabled, type `nixpkgs.rxvt-unicode.plugins.<tab>`.
+
+In addition to `plugins` the options `extraDeps` and `perlDeps` can be used to install extra packages. `extraDeps` can be used, for example, to provide `xsel` (a clipboard manager) to the clipboard plugin, without installing it globally:
+
+```nix
+rxvt-unicode.override {
+  configure = { availablePlugins, ... }: {
+    pluginsDeps = [ xsel ];
+  };
+}
+```
+
+`perlDeps` is a handy way to provide Perl packages to your custom plugins (in `$HOME/.urxvt/ext`). For example, if you need `AnyEvent` you can do:
+
+```nix
+rxvt-unicode.override {
+  configure = { availablePlugins, ... }: {
+    perlDeps = with perlPackages; [ AnyEvent ];
+  };
+}
+```
+
+## Packaging urxvt plugins {#sec-urxvt-pkg}
+
+Urxvt plugins resides in `pkgs/applications/misc/rxvt-unicode-plugins`. To add a new plugin, create an expression in a subdirectory and add the package to the set in `pkgs/applications/misc/rxvt-unicode-plugins/default.nix`.
+
+A plugin can be any kind of derivation, the only requirement is that it should always install perl scripts in `$out/lib/urxvt/perl`. Look for existing plugins for examples.
+
+If the plugin is itself a Perl package that needs to be imported from other plugins or scripts, add the following passthrough:
+
+```nix
+{
+  passthru.perlPackages = [ "self" ];
+}
+```
+
+This will make the urxvt wrapper pick up the dependency and set up the Perl path accordingly.
diff --git a/nixpkgs/doc/packages/weechat.section.md b/nixpkgs/doc/packages/weechat.section.md
new file mode 100644
index 000000000000..295397f476b0
--- /dev/null
+++ b/nixpkgs/doc/packages/weechat.section.md
@@ -0,0 +1,85 @@
+# WeeChat {#sec-weechat}
+
+WeeChat can be configured to include your choice of plugins, reducing its closure size from the default configuration which includes all available plugins. To make use of this functionality, install an expression that overrides its configuration, such as:
+
+```nix
+weechat.override {configure = ({availablePlugins, ...}: {
+    plugins = with availablePlugins; [ python perl ];
+  });
+}
+```
+
+If the `configure` function returns an attrset without the `plugins` attribute, `availablePlugins` will be used automatically.
+
+The plugins currently available are `python`, `perl`, `ruby`, `guile`, `tcl` and `lua`.
+
+The Python and Perl plugins allows the addition of extra libraries. For instance, the `inotify.py` script in `weechat-scripts` requires D-Bus or libnotify, and the `fish.py` script requires `pycrypto`. To use these scripts, use the plugin's `withPackages` attribute:
+
+```nix
+weechat.override { configure = {availablePlugins, ...}: {
+    plugins = with availablePlugins; [
+            (python.withPackages (ps: with ps; [ pycrypto python-dbus ]))
+        ];
+    };
+}
+```
+
+In order to also keep all default plugins installed, it is possible to use the following method:
+
+```nix
+weechat.override { configure = { availablePlugins, ... }: {
+  plugins = builtins.attrValues (availablePlugins // {
+    python = availablePlugins.python.withPackages (ps: with ps; [ pycrypto python-dbus ]);
+  });
+}; }
+```
+
+WeeChat allows to set defaults on startup using the `--run-command`. The `configure` method can be used to pass commands to the program:
+
+```nix
+weechat.override {
+  configure = { availablePlugins, ... }: {
+    init = ''
+      /set foo bar
+      /server add libera irc.libera.chat
+    '';
+  };
+}
+```
+
+Further values can be added to the list of commands when running `weechat --run-command "your-commands"`.
+
+Additionally, it's possible to specify scripts to be loaded when starting `weechat`. These will be loaded before the commands from `init`:
+
+```nix
+weechat.override {
+  configure = { availablePlugins, ... }: {
+    scripts = with pkgs.weechatScripts; [
+      weechat-xmpp weechat-matrix-bridge wee-slack
+    ];
+    init = ''
+      /set plugins.var.python.jabber.key "val"
+    '';
+  };
+}
+```
+
+In `nixpkgs` there's a subpackage which contains derivations for WeeChat scripts. Such derivations expect a `passthru.scripts` attribute, which contains a list of all scripts inside the store path. Furthermore, all scripts have to live in `$out/share`. An exemplary derivation looks like this:
+
+```nix
+{ stdenv, fetchurl }:
+
+stdenv.mkDerivation {
+  name = "exemplary-weechat-script";
+  src = fetchurl {
+    url = "https://scripts.tld/your-scripts.tar.gz";
+    hash = "...";
+  };
+  passthru.scripts = [ "foo.py" "bar.lua" ];
+  installPhase = ''
+    mkdir $out/share
+    cp foo.py $out/share
+    cp bar.lua $out/share
+  '';
+}
+```
diff --git a/nixpkgs/doc/packages/xorg.section.md b/nixpkgs/doc/packages/xorg.section.md
new file mode 100644
index 000000000000..ae885f923467
--- /dev/null
+++ b/nixpkgs/doc/packages/xorg.section.md
@@ -0,0 +1,34 @@
+# X.org {#sec-xorg}
+
+The Nix expressions for the X.org packages reside in `pkgs/servers/x11/xorg/default.nix`. This file is automatically generated from lists of tarballs in an X.org release. As such it should not be modified directly; rather, you should modify the lists, the generator script or the file `pkgs/servers/x11/xorg/overrides.nix`, in which you can override or add to the derivations produced by the generator.
+
+## Katamari Tarballs {#katamari-tarballs}
+
+X.org upstream releases used to include [katamari](https://en.wiktionary.org/wiki/%E3%81%8B%E3%81%9F%E3%81%BE%E3%82%8A) releases, which included a holistic recommended version for each tarball, up until 7.7. To create a list of tarballs in a katamari release:
+
+```ShellSession
+export release="X11R7.7"
+export url="mirror://xorg/$release/src/everything/"
+cat $(PRINT_PATH=1 nix-prefetch-url $url | tail -n 1) \
+  | perl -e 'while (<>) { if (/(href|HREF)="([^"]*.bz2)"/) { print "$ENV{'url'}$2\n"; }; }' \
+  | sort > "tarballs-$release.list"
+```
+
+## Individual Tarballs {#individual-tarballs}
+
+The upstream release process for [X11R7.8](https://x.org/wiki/Releases/7.8/) does not include a planned katamari. Instead, each component of X.org is released as its own tarball. We maintain `pkgs/servers/x11/xorg/tarballs.list` as a list of tarballs for each individual package. This list includes X.org core libraries and protocol descriptions, extra newer X11 interface libraries, like `xorg.libxcb`, and classic utilities which are largely unused but still available if needed, like `xorg.imake`.
+
+## Generating Nix Expressions {#generating-nix-expressions}
+
+The generator is invoked as follows:
+
+```ShellSession
+cd pkgs/servers/x11/xorg
+<tarballs.list perl ./generate-expr-from-tarballs.pl
+```
+
+For each of the tarballs in the `.list` files, the script downloads it, unpacks it, and searches its `configure.ac` and `*.pc.in` files for dependencies. This information is used to generate `default.nix`. The generator caches downloaded tarballs between runs. Pay close attention to the `NOT FOUND: $NAME` messages at the end of the run, since they may indicate missing dependencies. (Some might be optional dependencies, however.)
+
+## Overriding the Generator {#overriding-the-generator}
+
+If the expression for a package requires derivation attributes that the generator cannot figure out automatically (say, `patches` or a `postInstall` hook), you should modify `pkgs/servers/x11/xorg/overrides.nix`.
diff --git a/nixpkgs/doc/preface.chapter.md b/nixpkgs/doc/preface.chapter.md
new file mode 100644
index 000000000000..e6a0905c5a95
--- /dev/null
+++ b/nixpkgs/doc/preface.chapter.md
@@ -0,0 +1,50 @@
+# Preface {#preface}
+
+The Nix Packages collection (Nixpkgs) is a set of thousands of packages for the
+[Nix package manager](https://nixos.org/nix/), released under a
+[permissive MIT license](https://github.com/NixOS/nixpkgs/blob/master/COPYING).
+Packages are available for several platforms, and can be used with the Nix
+package manager on most GNU/Linux distributions as well as [NixOS](https://nixos.org/nixos).
+
+This document is the user [_reference_](https://nix.dev/contributing/documentation/diataxis#reference) manual for Nixpkgs.
+It describes entire public interface of Nixpkgs in a concise and orderly manner, and all relevant behaviors, with examples and cross-references.
+
+To discover other kinds of documentation:
+- [nix.dev](https://nix.dev/): Tutorials and guides for getting things done with Nix
+- [NixOS **Option Search**](https://search.nixos.org/options) and reference documentation
+- [Nixpkgs **Package Search**](https://search.nixos.org/packages)
+- [**NixOS** manual](https://nixos.org/manual/nixos/stable/): Reference documentation for the NixOS Linux distribution
+- [`CONTRIBUTING.md`](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md): Contributing to Nixpkgs, including this manual
+
+## Overview of Nixpkgs {#overview-of-nixpkgs}
+
+Nix expressions describe how to build packages from source and are collected in
+the [nixpkgs repository](https://github.com/NixOS/nixpkgs). Also included in the
+collection are Nix expressions for
+[NixOS modules](https://nixos.org/nixos/manual/index.html#sec-writing-modules).
+With these expressions the Nix package manager can build binary packages.
+
+Packages, including the Nix packages collection, are distributed through
+[channels](https://nixos.org/nix/manual/#sec-channels). The collection is
+distributed for users of Nix on non-NixOS distributions through the channel
+`nixpkgs-unstable`. Users of NixOS generally use one of the `nixos-*` channels,
+e.g. `nixos-22.11`, which includes all packages and modules for the stable NixOS
+22.11. Stable NixOS releases are generally only given
+security updates. More up to date packages and modules are available via the
+`nixos-unstable` channel.
+
+Both `nixos-unstable` and `nixpkgs-unstable` follow the `master` branch of the
+nixpkgs repository, although both do lag the `master` branch by generally
+[a couple of days](https://status.nixos.org/). Updates to a channel are
+distributed as soon as all tests for that channel pass, e.g.
+[this table](https://hydra.nixos.org/job/nixpkgs/trunk/unstable#tabs-constituents)
+shows the status of tests for the `nixpkgs-unstable` channel.
+
+The tests are conducted by a cluster called [Hydra](https://nixos.org/hydra/),
+which also builds binary packages from the Nix expressions in Nixpkgs for
+`x86_64-linux`, `i686-linux` and `x86_64-darwin`.
+The binaries are made available via a [binary cache](https://cache.nixos.org).
+
+The current Nix expressions of the channels are available in the
+[nixpkgs repository](https://github.com/NixOS/nixpkgs) in branches
+that correspond to the channel names (e.g. `nixos-22.11-small`).
diff --git a/nixpkgs/doc/shell.nix b/nixpkgs/doc/shell.nix
new file mode 100644
index 000000000000..d71e3f3a709a
--- /dev/null
+++ b/nixpkgs/doc/shell.nix
@@ -0,0 +1,20 @@
+let
+  pkgs = import ../. {
+    config = {};
+    overlays = [];
+  };
+
+  common = import ./common.nix;
+  inherit (common) outputPath indexPath;
+
+  web-devmode = import ../pkgs/tools/nix/web-devmode.nix {
+    inherit pkgs;
+    buildArgs = "./.";
+    open = "/${outputPath}/${indexPath}";
+  };
+in
+  pkgs.mkShell {
+    packages = [
+      web-devmode
+    ];
+  }
diff --git a/nixpkgs/doc/stdenv.md b/nixpkgs/doc/stdenv.md
new file mode 100644
index 000000000000..1ef81f84b514
--- /dev/null
+++ b/nixpkgs/doc/stdenv.md
@@ -0,0 +1,9 @@
+# Standard environment {#part-stdenv}
+
+```{=include=} chapters
+stdenv/stdenv.chapter.md
+stdenv/meta.chapter.md
+stdenv/multiple-output.chapter.md
+stdenv/cross-compilation.chapter.md
+stdenv/platform-notes.chapter.md
+```
diff --git a/nixpkgs/doc/stdenv/cross-compilation.chapter.md b/nixpkgs/doc/stdenv/cross-compilation.chapter.md
new file mode 100644
index 000000000000..76c931ba047a
--- /dev/null
+++ b/nixpkgs/doc/stdenv/cross-compilation.chapter.md
@@ -0,0 +1,264 @@
+# Cross-compilation {#chap-cross}
+
+## Introduction {#sec-cross-intro}
+
+"Cross-compilation" means compiling a program on one machine for another type of machine. For example, a typical use of cross-compilation is to compile programs for embedded devices. These devices often don't have the computing power and memory to compile their own programs. One might think that cross-compilation is a fairly niche concern. However, there are significant advantages to rigorously distinguishing between build-time and run-time environments! Significant, because the benefits apply even when one is developing and deploying on the same machine. Nixpkgs is increasingly adopting the opinion that packages should be written with cross-compilation in mind, and Nixpkgs should evaluate in a similar way (by minimizing cross-compilation-specific special cases) whether or not one is cross-compiling.
+
+This chapter will be organized in three parts. First, it will describe the basics of how to package software in a way that supports cross-compilation. Second, it will describe how to use Nixpkgs when cross-compiling. Third, it will describe the internal infrastructure supporting cross-compilation.
+
+## Packaging in a cross-friendly manner {#sec-cross-packaging}
+
+### Platform parameters {#ssec-cross-platform-parameters}
+
+Nixpkgs follows the [conventions of GNU autoconf](https://gcc.gnu.org/onlinedocs/gccint/Configure-Terms.html). We distinguish between 3 types of platforms when building a derivation: _build_, _host_, and _target_. In summary, _build_ is the platform on which a package is being built, _host_ is the platform on which it will run. The third attribute, _target_, is relevant only for certain specific compilers and build tools.
+
+In Nixpkgs, these three platforms are defined as attribute sets under the names `buildPlatform`, `hostPlatform`, and `targetPlatform`. They are always defined as attributes in the standard environment. That means one can access them like:
+
+```nix
+{ stdenv, fooDep, barDep, ... }: {
+  # ...stdenv.buildPlatform...
+}
+```
+
+`buildPlatform`
+
+: The "build platform" is the platform on which a package is built. Once someone has a built package, or pre-built binary package, the build platform should not matter and can be ignored.
+
+`hostPlatform`
+
+: The "host platform" is the platform on which a package will be run. This is the simplest platform to understand, but also the one with the worst name.
+
+`targetPlatform`
+
+: The "target platform" attribute is, unlike the other two attributes, not actually fundamental to the process of building software. Instead, it is only relevant for compatibility with building certain specific compilers and build tools. It can be safely ignored for all other packages.
+
+: The build process of certain compilers is written in such a way that the compiler resulting from a single build can itself only produce binaries for a single platform. The task of specifying this single "target platform" is thus pushed to build time of the compiler. The root cause of this is that the compiler (which will be run on the host) and the standard library/runtime (which will be run on the target) are built by a single build process.
+
+: There is no fundamental need to think about a single target ahead of time like this. If the tool supports modular or pluggable backends, both the need to specify the target at build time and the constraint of having only a single target disappear. An example of such a tool is LLVM.
+
+: Although the existence of a "target platform" is arguably a historical mistake, it is a common one: examples of tools that suffer from it are GCC, Binutils, GHC and Autoconf. Nixpkgs tries to avoid sharing in the mistake where possible. Still, because the concept of a target platform is so ingrained, it is best to support it as is.
+
+The exact schema these fields follow is a bit ill-defined due to a long and convoluted evolution, but this is slowly being cleaned up. You can see examples of ones used in practice in `lib.systems.examples`; note how they are not all very consistent. For now, here are few fields can count on them containing:
+
+`system`
+
+: This is a two-component shorthand for the platform. Examples of this would be "x86_64-darwin" and "i686-linux"; see `lib.systems.doubles` for more. The first component corresponds to the CPU architecture of the platform and the second to the operating system of the platform (`[cpu]-[os]`). This format has built-in support in Nix, such as the `builtins.currentSystem` impure string.
+
+`config`
+
+: This is a 3- or 4- component shorthand for the platform. Examples of this would be `x86_64-unknown-linux-gnu` and `aarch64-apple-darwin14`. This is a standard format called the "LLVM target triple", as they are pioneered by LLVM. In the 4-part form, this corresponds to `[cpu]-[vendor]-[os]-[abi]`. This format is strictly more informative than the "Nix host double", as the previous format could analogously be termed. This needs a better name than `config`!
+
+`parsed`
+
+: This is a Nix representation of a parsed LLVM target triple with white-listed components. This can be specified directly, or actually parsed from the `config`. See `lib.systems.parse` for the exact representation.
+
+`libc`
+
+: This is a string identifying the standard C library used. Valid identifiers include "glibc" for GNU libc, "libSystem" for Darwin's Libsystem, and "uclibc" for µClibc. It should probably be refactored to use the module system, like `parse`.
+
+`is*`
+
+: These predicates are defined in `lib.systems.inspect`, and slapped onto every platform. They are superior to the ones in `stdenv` as they force the user to be explicit about which platform they are inspecting. Please use these instead of those.
+
+`platform`
+
+: This is, quite frankly, a dumping ground of ad-hoc settings (it's an attribute set). See `lib.systems.platforms` for examples—there's hopefully one in there that will work verbatim for each platform that is working. Please help us triage these flags and give them better homes!
+
+### Theory of dependency categorization {#ssec-cross-dependency-categorization}
+
+::: {.note}
+This is a rather philosophical description that isn't very Nixpkgs-specific. For an overview of all the relevant attributes given to `mkDerivation`, see [](#ssec-stdenv-dependencies). For a description of how everything is implemented, see [](#ssec-cross-dependency-implementation).
+:::
+
+In this section we explore the relationship between both runtime and build-time dependencies and the 3 Autoconf platforms.
+
+A run time dependency between two packages requires that their host platforms match. This is directly implied by the meaning of "host platform" and "runtime dependency": The package dependency exists while both packages are running on a single host platform.
+
+A build time dependency, however, has a shift in platforms between the depending package and the depended-on package. "build time dependency" means that to build the depending package we need to be able to run the depended-on's package. The depending package's build platform is therefore equal to the depended-on package's host platform.
+
+If both the dependency and depending packages aren't compilers or other machine-code-producing tools, we're done. And indeed `buildInputs` and `nativeBuildInputs` have covered these simpler cases for many years. But if the dependency does produce machine code, we might need to worry about its target platform too. In principle, that target platform might be any of the depending package's build, host, or target platforms, but we prohibit dependencies from a "later" platform to an earlier platform to limit confusion because we've never seen a legitimate use for them.
+
+Finally, if the depending package is a compiler or other machine-code-producing tool, it might need dependencies that run at "emit time". This is for compilers that (regrettably) insist on being built together with their source languages' standard libraries. Assuming build != host != target, a run-time dependency of the standard library cannot be run at the compiler's build time or run time, but only at the run time of code emitted by the compiler.
+
+Putting this all together, that means that we have dependency types of the form "X→ E", which means that the dependency executes on X and emits code for E; each of X and E can be `build`, `host`, or `target`, and E can be `*` to indicate that the dependency is not a compiler-like package.
+
+Dependency types describe the relationships that a package has with each of its transitive dependencies.  You could think of attaching one or more dependency types to each of the formal parameters at the top of a package's `.nix` file, as well as to all of *their* formal parameters, and so on.   Triples like `(foo, bar, baz)`, on the other hand, are a property of an instantiated derivation -- you could would attach a triple `(mips-linux, mips-linux, sparc-solaris)` to a `.drv` file in `/nix/store`.
+
+Only nine dependency types matter in practice:
+
+#### Possible dependency types {#possible-dependency-types}
+
+| Dependency type | Dependency’s host platform | Dependency’s target platform |
+|-----------------|----------------------------|------------------------------|
+| build → *       | build                      | (none)                       |
+| build → build   | build                      | build                        |
+| build → host    | build                      | host                         |
+| build → target  | build                      | target                       |
+| host → *        | host                       | (none)                       |
+| host → host     | host                       | host                         |
+| host → target   | host                       | target                       |
+| target → *      | target                     | (none)                       |
+| target → target | target                     | target                       |
+
+Let's use `g++` as an example to make this table clearer.  `g++` is a C++ compiler written in C.  Suppose we are building `g++` with a `(build, host, target)` platform triple of `(foo, bar, baz)`.  This means we are using a `foo`-machine to build a copy of `g++` which will run on a `bar`-machine and emit binaries for the `baz`-machine.
+
+* `g++` links against the host platform's `glibc` C library, which is a "host→ *" dependency with a triple of `(bar, bar, *)`.  Since it is a library, not a compiler, it has no "target".
+
+* Since `g++` is written in C, the `gcc` compiler used to compile it is a "build→ host" dependency of `g++` with a triple of `(foo, foo, bar)`.  This compiler runs on the build platform and emits code for the host platform.
+
+* `gcc` links against the build platform's `glibc` C library, which is a "build→ *" dependency with a triple of `(foo, foo, *)`.  Since it is a library, not a compiler, it has no "target".
+
+* This `gcc` is itself compiled by an *earlier* copy of `gcc`.  This earlier copy of `gcc` is a "build→ build" dependency of `g++` with a triple of `(foo, foo, foo)`.  This "early `gcc`" runs on the build platform and emits code for the build platform.
+
+* `g++` is bundled with `libgcc`, which includes a collection of target-machine routines for exception handling and
+software floating point emulation.  `libgcc` would be a "target→ *" dependency with triple `(foo, baz, *)`, because it consists of machine code which gets linked against the output of the compiler that we are building.  It is a library, not a compiler, so it has no target of its own.
+
+* `libgcc` is written in C and compiled with `gcc`.  The `gcc` that compiles it will be a "build→ target" dependency with triple `(foo, foo, baz)`.  It gets compiled *and run* at `g++`-build-time (on platform `foo`), but must emit code for the `baz`-platform.
+
+* `g++` allows inline assembler code, so it depends on access to a copy of the `gas` assembler.  This would be a "host→ target" dependency with triple `(foo, bar, baz)`.
+
+* `g++` (and `gcc`) include a library `libgccjit.so`, which wrap the compiler in a library to create a just-in-time compiler.  In nixpkgs, this library is in the `libgccjit` package; if C++ required that programs have access to a JIT, `g++` would need to add a "target→ target" dependency for `libgccjit` with triple `(foo, baz, baz)`.  This would ensure that the compiler ships with a copy of `libgccjit` which both executes on and generates code for the `baz`-platform.
+
+* If `g++` itself linked against `libgccjit.so` (for example, to allow compile-time-evaluated C++ expressions), then the `libgccjit` package used to provide this functionality would be a "host→ host" dependency of `g++`: it is code which runs on the `host` and emits code for execution on the `host`.
+
+### Cross packaging cookbook {#ssec-cross-cookbook}
+
+Some frequently encountered problems when packaging for cross-compilation should be answered here. Ideally, the information above is exhaustive, so this section cannot provide any new information, but it is ludicrous and cruel to expect everyone to spend effort working through the interaction of many features just to figure out the same answer to the same common problem. Feel free to add to this list!
+
+#### My package fails to find a binutils command (`cc`/`ar`/`ld` etc.) {#cross-qa-fails-to-find-binutils}
+Many packages assume that an unprefixed binutils (`cc`/`ar`/`ld` etc.) is available, but Nix doesn't provide one. It only provides a prefixed one, just as it only does for all the other binutils programs. It may be necessary to patch the package to fix the build system to use a prefix. For instance, instead of `cc`, use `${stdenv.cc.targetPrefix}cc`.
+
+```nix
+{
+  makeFlags = [ "CC=${stdenv.cc.targetPrefix}cc" ];
+}
+```
+
+#### How do I avoid compiling a GCC cross-compiler from source? {#cross-qa-avoid-compiling-gcc-cross-compiler}
+On less powerful machines, it can be inconvenient to cross-compile a package only to find out that GCC has to be compiled from source, which could take up to several hours. Nixpkgs maintains a limited [cross-related jobset on Hydra](https://hydra.nixos.org/jobset/nixpkgs/cross-trunk), which tests cross-compilation to various platforms from build platforms "x86\_64-darwin", "x86\_64-linux", and "aarch64-linux".  See `pkgs/top-level/release-cross.nix` for the full list of target platforms and packages.  For instance, the following invocation fetches the pre-built cross-compiled GCC for `armv6l-unknown-linux-gnueabihf` and builds GNU Hello from source.
+
+```ShellSession
+$ nix-build '<nixpkgs>' -A pkgsCross.raspberryPi.hello
+```
+
+#### What if my package’s build system needs to build a C program to be run under the build environment? {#cross-qa-build-c-program-in-build-environment}
+
+Add the following to your `mkDerivation` invocation.
+
+```nix
+{
+  depsBuildBuild = [ buildPackages.stdenv.cc ];
+}
+```
+
+#### My package’s testsuite needs to run host platform code. {#cross-testsuite-runs-host-code}
+
+Add the following to your `mkDerivation` invocation.
+
+```nix
+{
+  doCheck = stdenv.buildPlatform.canExecute stdenv.hostPlatform;
+}
+```
+
+#### Package using Meson needs to run binaries for the host platform during build. {#cross-meson-runs-host-code}
+
+Add `mesonEmulatorHook` to `nativeBuildInputs` conditionally on if the target binaries can be executed.
+
+e.g.
+
+```nix
+{
+  nativeBuildInputs = [
+    meson
+  ] ++ lib.optionals (!stdenv.buildPlatform.canExecute stdenv.hostPlatform) [
+    mesonEmulatorHook
+  ];
+}
+```
+
+Example of an error which this fixes.
+
+`[Errno 8] Exec format error: './gdk3-scan'`
+
+## Cross-building packages {#sec-cross-usage}
+
+Nixpkgs can be instantiated with `localSystem` alone, in which case there is no cross-compiling and everything is built by and for that system, or also with `crossSystem`, in which case packages run on the latter, but all building happens on the former. Both parameters take the same schema as the 3 (build, host, and target) platforms defined in the previous section. As mentioned above, `lib.systems.examples` has some platforms which are used as arguments for these parameters in practice. You can use them programmatically, or on the command line:
+
+```ShellSession
+$ nix-build '<nixpkgs>' --arg crossSystem '(import <nixpkgs/lib>).systems.examples.fooBarBaz' -A whatever
+```
+
+::: {.note}
+Eventually we would like to make these platform examples an unnecessary convenience so that
+
+```ShellSession
+$ nix-build '<nixpkgs>' --arg crossSystem '{ config = "<arch>-<os>-<vendor>-<abi>"; }' -A whatever
+```
+
+works in the vast majority of cases. The problem today is dependencies on other sorts of configuration which aren't given proper defaults. We rely on the examples to crudely to set those configuration parameters in some vaguely sane manner on the users behalf. Issue [\#34274](https://github.com/NixOS/nixpkgs/issues/34274) tracks this inconvenience along with its root cause in crufty configuration options.
+:::
+
+While one is free to pass both parameters in full, there's a lot of logic to fill in missing fields. As discussed in the previous section, only one of `system`, `config`, and `parsed` is needed to infer the other two. Additionally, `libc` will be inferred from `parse`. Finally, `localSystem.system` is also _impurely_ inferred based on the platform evaluation occurs. This means it is often not necessary to pass `localSystem` at all, as in the command-line example in the previous paragraph.
+
+::: {.note}
+Many sources (manual, wiki, etc) probably mention passing `system`, `platform`, along with the optional `crossSystem` to Nixpkgs: `import <nixpkgs> { system = ..; platform = ..; crossSystem = ..; }`. Passing those two instead of `localSystem` is still supported for compatibility, but is discouraged. Indeed, much of the inference we do for these parameters is motivated by compatibility as much as convenience.
+:::
+
+One would think that `localSystem` and `crossSystem` overlap horribly with the three `*Platforms` (`buildPlatform`, `hostPlatform,` and `targetPlatform`; see `stage.nix` or the manual). Actually, those identifiers are purposefully not used here to draw a subtle but important distinction: While the granularity of having 3 platforms is necessary to properly *build* packages, it is overkill for specifying the user's *intent* when making a build plan or package set. A simple "build vs deploy" dichotomy is adequate: the sliding window principle described in the previous section shows how to interpolate between the these two "end points" to get the 3 platform triple for each bootstrapping stage. That means for any package a given package set, even those not bound on the top level but only reachable via dependencies or `buildPackages`, the three platforms will be defined as one of `localSystem` or `crossSystem`, with the former replacing the latter as one traverses build-time dependencies. A last simple difference is that `crossSystem` should be null when one doesn't want to cross-compile, while the `*Platform`s are always non-null. `localSystem` is always non-null.
+
+## Cross-compilation infrastructure {#sec-cross-infra}
+
+### Implementation of dependencies {#ssec-cross-dependency-implementation}
+
+The categories of dependencies developed in [](#ssec-cross-dependency-categorization) are specified as lists of derivations given to `mkDerivation`, as documented in [](#ssec-stdenv-dependencies). In short, each list of dependencies for "host → target" is called `deps<host><target>` (where `host`, and `target` values are either `build`, `host`, or `target`), with exceptions for backwards compatibility that `depsBuildHost` is instead called `nativeBuildInputs` and `depsHostTarget` is instead called `buildInputs`. Nixpkgs is now structured so that each `deps<host><target>` is automatically taken from `pkgs<host><target>`. (These `pkgs<host><target>`s are quite new, so there is no special case for `nativeBuildInputs` and `buildInputs`.) For example, `pkgsBuildHost.gcc` should be used at build-time, while `pkgsHostTarget.gcc` should be used at run-time.
+
+Now, for most of Nixpkgs's history, there were no `pkgs<host><target>` attributes, and most packages have not been refactored to use it explicitly. Prior to those, there were just `buildPackages`, `pkgs`, and `targetPackages`. Those are now redefined as aliases to `pkgsBuildHost`, `pkgsHostTarget`, and `pkgsTargetTarget`. It is acceptable, even recommended, to use them for libraries to show that the host platform is irrelevant.
+
+But before that, there was just `pkgs`, even though both `buildInputs` and `nativeBuildInputs` existed. \[Cross barely worked, and those were implemented with some hacks on `mkDerivation` to override dependencies.\] What this means is the vast majority of packages do not use any explicit package set to populate their dependencies, just using whatever `callPackage` gives them even if they do correctly sort their dependencies into the multiple lists described above. And indeed, asking that users both sort their dependencies, _and_ take them from the right attribute set, is both too onerous and redundant, so the recommended approach (for now) is to continue just categorizing by list and not using an explicit package set.
+
+To make this work, we "splice" together the six `pkgsFooBar` package sets and have `callPackage` actually take its arguments from that. This is currently implemented in `pkgs/top-level/splice.nix`. `mkDerivation` then, for each dependency attribute, pulls the right derivation out from the splice. This splicing can be skipped when not cross-compiling as the package sets are the same, but still is a bit slow for cross-compiling. We'd like to do something better, but haven't come up with anything yet.
+
+### Bootstrapping {#ssec-bootstrapping}
+
+Each of the package sets described above come from a single bootstrapping stage. While `pkgs/top-level/default.nix`, coordinates the composition of stages at a high level, `pkgs/top-level/stage.nix` "ties the knot" (creates the fixed point) of each stage. The package sets are defined per-stage however, so they can be thought of as edges between stages (the nodes) in a graph. Compositions like `pkgsBuildTarget.targetPackages` can be thought of as paths to this graph.
+
+While there are many package sets, and thus many edges, the stages can also be arranged in a linear chain. In other words, many of the edges are redundant as far as connectivity is concerned. This hinges on the type of bootstrapping we do. Currently for cross it is:
+
+1.  `(native, native, native)`
+
+2.  `(native, native, foreign)`
+
+3.  `(native, foreign, foreign)`
+
+In each stage, `pkgsBuildHost` refers to the previous stage, `pkgsBuildBuild` refers to the one before that, and `pkgsHostTarget` refers to the current one, and `pkgsTargetTarget` refers to the next one. When there is no previous or next stage, they instead refer to the current stage. Note how all the invariants regarding the mapping between dependency and depending packages' build host and target platforms are preserved. `pkgsBuildTarget` and `pkgsHostHost` are more complex in that the stage fitting the requirements isn't always a fixed chain of "prevs" and "nexts" away (modulo the "saturating" self-references at the ends). We just special case each instead. All the primary edges are implemented is in `pkgs/stdenv/booter.nix`, and secondarily aliases in `pkgs/top-level/stage.nix`.
+
+::: {.note}
+The native stages are bootstrapped in legacy ways that predate the current cross implementation. This is why the bootstrapping stages leading up to the final stages are ignored in the previous paragraph.
+:::
+
+If one looks at the 3 platform triples, one can see that they overlap such that one could put them together into a chain like:
+```
+(native, native, native, foreign, foreign)
+```
+
+If one imagines the saturating self references at the end being replaced with infinite stages, and then overlays those platform triples, one ends up with the infinite tuple:
+```
+(native..., native, native, native, foreign, foreign, foreign...)
+```
+One can then imagine any sequence of platforms such that there are bootstrap stages with their 3 platforms determined by "sliding a window" that is the 3 tuple through the sequence. This was the original model for bootstrapping. Without a target platform (assume a better world where all compilers are multi-target and all standard libraries are built in their own derivation), this is sufficient. Conversely if one wishes to cross compile "faster", with a "Canadian Cross" bootstrapping stage where `build != host != target`, more bootstrapping stages are needed since no sliding window provides the pesky `pkgsBuildTarget` package set since it skips the Canadian cross stage's "host".
+
+
+::: {.note}
+It is much better to refer to `buildPackages` than `targetPackages`, or more broadly package sets that do not mention “target”. There are three reasons for this.
+
+First, it is because bootstrapping stages do not have a unique `targetPackages`. For example a `(x86-linux, x86-linux, arm-linux)` and `(x86-linux, x86-linux, x86-windows)` package set both have a `(x86-linux, x86-linux, x86-linux)` package set. Because there is no canonical `targetPackages` for such a native (`build == host == target`) package set, we set their `targetPackages`
+
+Second, it is because this is a frequent source of hard-to-follow "infinite recursions" / cycles. When only package sets that don't mention target are used, the package set forms a directed acyclic graph. This means that all cycles that exist are confined to one stage. This means they are a lot smaller, and easier to follow in the code or a backtrace. It also means they are present in native and cross builds alike, and so more likely to be caught by CI and other users.
+
+Thirdly, it is because everything target-mentioning only exists to accommodate compilers with lousy build systems that insist on the compiler itself and standard library being built together. Of course that is bad because bigger derivations means longer rebuilds. It is also problematic because it tends to make the standard libraries less like other libraries than they could be, complicating code and build systems alike. Because of the other problems, and because of these innate disadvantages, compilers ought to be packaged another way where possible.
+:::
+
+::: {.note}
+If one explores Nixpkgs, they will see derivations with names like `gccCross`. Such `*Cross` derivations is a holdover from before we properly distinguished between the host and target platforms—the derivation with “Cross” in the name covered the `build = host != target` case, while the other covered the `host = target`, with build platform the same or not based on whether one was using its `.__spliced.buildHost` or `.__spliced.hostTarget`.
+:::
diff --git a/nixpkgs/doc/stdenv/meta.chapter.md b/nixpkgs/doc/stdenv/meta.chapter.md
new file mode 100644
index 000000000000..7f57eda791ea
--- /dev/null
+++ b/nixpkgs/doc/stdenv/meta.chapter.md
@@ -0,0 +1,286 @@
+# Meta-attributes {#chap-meta}
+
+Nix packages can declare *meta-attributes* that contain information about a package such as a description, its homepage, its license, and so on. For instance, the GNU Hello package has a `meta` declaration like this:
+
+```nix
+{
+  meta = {
+    description = "A program that produces a familiar, friendly greeting";
+    longDescription = ''
+      GNU Hello is a program that prints "Hello, world!" when you run it.
+      It is fully customizable.
+    '';
+    homepage = "https://www.gnu.org/software/hello/manual/";
+    license = lib.licenses.gpl3Plus;
+    maintainers = with lib.maintainers; [ eelco ];
+    platforms = lib.platforms.all;
+  };
+}
+```
+
+Meta-attributes are not passed to the builder of the package. Thus, a change to a meta-attribute doesn’t trigger a recompilation of the package.
+
+## Standard meta-attributes {#sec-standard-meta-attributes}
+
+It is expected that each meta-attribute is one of the following:
+
+### `description` {#var-meta-description}
+
+A short (one-line) description of the package.
+This is displayed on [search.nixos.org](https://search.nixos.org/packages).
+
+Don’t include a period at the end. Don’t include newline characters. Capitalise the first character. For brevity, don’t repeat the name of package --- just describe what it does.
+
+Wrong: `"libpng is a library that allows you to decode PNG images."`
+
+Right: `"A library for decoding PNG images"`
+
+### `longDescription` {#var-meta-longDescription}
+
+An arbitrarily long description of the package in [CommonMark](https://commonmark.org) Markdown.
+
+### `branch` {#var-meta-branch}
+
+Release branch. Used to specify that a package is not going to receive updates that are not in this branch; for example, Linux kernel 3.0 is supposed to be updated to 3.0.X, not 3.1.
+
+### `homepage` {#var-meta-homepage}
+
+The package’s homepage. Example: `https://www.gnu.org/software/hello/manual/`
+
+### `downloadPage` {#var-meta-downloadPage}
+
+The page where a link to the current version can be found. Example: `https://ftp.gnu.org/gnu/hello/`
+
+### `changelog` {#var-meta-changelog}
+
+A link or a list of links to the location of Changelog for a package. A link may use expansion to refer to the correct changelog version. Example: `"https://git.savannah.gnu.org/cgit/hello.git/plain/NEWS?h=v${version}"`
+
+### `license` {#var-meta-license}
+
+The license, or licenses, for the package. One from the attribute set defined in [`nixpkgs/lib/licenses.nix`](https://github.com/NixOS/nixpkgs/blob/master/lib/licenses.nix). At this moment using both a list of licenses and a single license is valid. If the license field is in the form of a list representation, then it means that parts of the package are licensed differently. Each license should preferably be referenced by their attribute. The non-list attribute value can also be a space delimited string representation of the contained attribute `shortNames` or `spdxIds`. The following are all valid examples:
+
+- Single license referenced by attribute (preferred) `lib.licenses.gpl3Only`.
+- Single license referenced by its attribute shortName (frowned upon) `"gpl3Only"`.
+- Single license referenced by its attribute spdxId (frowned upon) `"GPL-3.0-only"`.
+- Multiple licenses referenced by attribute (preferred) `with lib.licenses; [ asl20 free ofl ]`.
+- Multiple licenses referenced as a space delimited string of attribute shortNames (frowned upon) `"asl20 free ofl"`.
+
+For details, see [Licenses](#sec-meta-license).
+
+### `maintainers` {#var-meta-maintainers}
+
+A list of the maintainers of this Nix expression. Maintainers are defined in [`nixpkgs/maintainers/maintainer-list.nix`](https://github.com/NixOS/nixpkgs/blob/master/maintainers/maintainer-list.nix). There is no restriction to becoming a maintainer, just add yourself to that list in a separate commit titled “maintainers: add alice” in the same pull request, and reference maintainers with `maintainers = with lib.maintainers; [ alice bob ]`.
+
+### `mainProgram` {#var-meta-mainProgram}
+
+The name of the main binary for the package. This affects the binary `nix run` executes. Example: `"rg"`
+
+### `priority` {#var-meta-priority}
+
+The *priority* of the package, used by `nix-env` to resolve file name conflicts between packages. See the [manual page for `nix-env`](https://nixos.org/manual/nix/stable/command-ref/nix-env) for details. Example: `"10"` (a low-priority package).
+
+### `platforms` {#var-meta-platforms}
+
+The list of Nix platform types on which the package is supported. Hydra builds packages according to the platform specified. If no platform is specified, the package does not have prebuilt binaries. An example is:
+
+```nix
+{
+  meta.platforms = lib.platforms.linux;
+}
+```
+
+Attribute Set `lib.platforms` defines [various common lists](https://github.com/NixOS/nixpkgs/blob/master/lib/systems/doubles.nix) of platforms types.
+
+### `badPlatforms` {#var-meta-badPlatforms}
+
+The list of Nix [platform types](https://github.com/NixOS/nixpkgs/blob/b03ac42b0734da3e7be9bf8d94433a5195734b19/lib/meta.nix#L75-L81) on which the package is known not to be buildable.
+Hydra will never create prebuilt binaries for these platform types, even if they are in [`meta.platforms`](#var-meta-platforms).
+In general it is preferable to set `meta.platforms = lib.platforms.all` and then exclude any platforms on which the package is known not to build.
+For example, a package which requires dynamic linking and cannot be linked statically could use this:
+
+```nix
+{
+  meta.platforms = lib.platforms.all;
+  meta.badPlatforms = [ lib.systems.inspect.patterns.isStatic ];
+}
+```
+
+The [`lib.meta.availableOn`](https://github.com/NixOS/nixpkgs/blob/b03ac42b0734da3e7be9bf8d94433a5195734b19/lib/meta.nix#L95-L106) function can be used to test whether or not a package is available (i.e. buildable) on a given platform.
+Some packages use this to automatically detect the maximum set of features with which they can be built.
+For example, `systemd` [requires dynamic linking](https://github.com/systemd/systemd/issues/20600#issuecomment-912338965), and [has a `meta.badPlatforms` setting](https://github.com/NixOS/nixpkgs/blob/b03ac42b0734da3e7be9bf8d94433a5195734b19/pkgs/os-specific/linux/systemd/default.nix#L752) similar to the one above.
+Packages which can be built with or without `systemd` support will use `lib.meta.availableOn` to detect whether or not `systemd` is available on the [`hostPlatform`](#ssec-cross-platform-parameters) for which they are being built; if it is not available (e.g. due to a statically-linked host platform like `pkgsStatic`) this support will be disabled by default.
+
+### `tests` {#var-meta-tests}
+
+::: {.warning}
+This attribute is special in that it is not actually under the `meta` attribute set but rather under the `passthru` attribute set. This is due to how `meta` attributes work, and the fact that they are supposed to contain only metadata, not derivations.
+:::
+
+An attribute set with tests as values. A test is a derivation that builds when the test passes and fails to build otherwise.
+
+You can run these tests with:
+
+```ShellSession
+$ cd path/to/nixpkgs
+$ nix-build -A your-package.tests
+```
+
+#### Package tests {#var-meta-tests-packages}
+
+Tests that are part of the source package are often executed in the `installCheckPhase`.
+
+Prefer `passthru.tests` for tests that are introduced in nixpkgs because:
+
+* `passthru.tests` tests the 'real' package, independently from the environment in which it was built
+* we can run `passthru.tests` independently
+* `installCheckPhase` adds overhead to each build
+
+For more on how to write and run package tests, see [](#sec-package-tests).
+
+#### NixOS tests {#var-meta-tests-nixos}
+
+The NixOS tests are available as `nixosTests` in parameters of derivations. For instance, the OpenSMTPD derivation includes lines similar to:
+
+```nix
+{ /* ... , */ nixosTests }:
+{
+  # ...
+  passthru.tests = {
+    basic-functionality-and-dovecot-integration = nixosTests.opensmtpd;
+  };
+}
+```
+
+NixOS tests run in a VM, so they are slower than regular package tests. For more information see [NixOS module tests](https://nixos.org/manual/nixos/stable/#sec-nixos-tests).
+
+Alternatively, you can specify other derivations as tests. You can make use of
+the optional parameter to inject the correct package without
+relying on non-local definitions, even in the presence of `overrideAttrs`.
+Here that's `finalAttrs.finalPackage`, but you could choose a different name if
+`finalAttrs` already exists in your scope.
+
+`(mypkg.overrideAttrs f).passthru.tests` will be as expected, as long as the
+definition of `tests` does not rely on the original `mypkg` or overrides it in
+all places.
+
+```nix
+# my-package/default.nix
+{ stdenv, callPackage }:
+stdenv.mkDerivation (finalAttrs: {
+  # ...
+  passthru.tests.example = callPackage ./example.nix { my-package = finalAttrs.finalPackage; };
+})
+```
+
+```nix
+# my-package/example.nix
+{ runCommand, lib, my-package, ... }:
+runCommand "my-package-test" {
+  nativeBuildInputs = [ my-package ];
+  src = lib.sources.sourcesByRegex ./. [ ".*.in" ".*.expected" ];
+} ''
+  my-package --help
+  my-package <example.in >example.actual
+  diff -U3 --color=auto example.expected example.actual
+  mkdir $out
+''
+```
+
+
+### `timeout` {#var-meta-timeout}
+
+A timeout (in seconds) for building the derivation. If the derivation takes longer than this time to build, Hydra will fail it due to breaking the timeout. However, all computers do not have the same computing power, hence some builders may decide to apply a multiplicative factor to this value. When filling this value in, try to keep it approximately consistent with other values already present in `nixpkgs`.
+
+`meta` attributes are not stored in the instantiated derivation.
+Therefore, this setting may be lost when the package is used as a dependency.
+To be effective, it must be presented directly to an evaluation process that handles the `meta.timeout` attribute.
+
+### `hydraPlatforms` {#var-meta-hydraPlatforms}
+
+The list of Nix platform types for which the [Hydra](https://github.com/nixos/hydra) [instance at `hydra.nixos.org`](https://nixos.org/hydra) will build the package. (Hydra is the Nix-based continuous build system.) It defaults to the value of `meta.platforms`. Thus, the only reason to set `meta.hydraPlatforms` is if you want `hydra.nixos.org` to build the package on a subset of `meta.platforms`, or not at all, e.g.
+
+```nix
+{
+  meta.platforms = lib.platforms.linux;
+  meta.hydraPlatforms = [];
+}
+```
+
+### `broken` {#var-meta-broken}
+
+If set to `true`, the package is marked as "broken", meaning that it won’t show up in [search.nixos.org](https://search.nixos.org/packages), and cannot be built or installed unless the environment variable [`NIXPKGS_ALLOW_BROKEN`](#opt-allowBroken) is set.
+Such unconditionally-broken packages should be removed from Nixpkgs eventually unless they are fixed.
+
+The value of this attribute can depend on a package's arguments, including `stdenv`.
+This means that `broken` can be used to express constraints, for example:
+
+- Does not cross compile
+
+  ```nix
+  {
+    meta.broken = !(stdenv.buildPlatform.canExecute stdenv.hostPlatform);
+  }
+  ```
+
+- Broken if all of a certain set of its dependencies are broken
+
+  ```nix
+  {
+    meta.broken = lib.all (map (p: p.meta.broken) [ glibc musl ]);
+  }
+  ```
+
+This makes `broken` strictly more powerful than `meta.badPlatforms`.
+However `meta.availableOn` currently examines only `meta.platforms` and `meta.badPlatforms`, so `meta.broken` does not influence the default values for optional dependencies.
+
+## Licenses {#sec-meta-license}
+
+The `meta.license` attribute should preferably contain a value from `lib.licenses` defined in [`nixpkgs/lib/licenses.nix`](https://github.com/NixOS/nixpkgs/blob/master/lib/licenses.nix), or in-place license description of the same format if the license is unlikely to be useful in another expression.
+
+Although it’s typically better to indicate the specific license, a few generic options are available:
+
+### `lib.licenses.free`, `"free"` {#lib.licenses.free-free}
+
+Catch-all for free software licenses not listed above.
+
+### `lib.licenses.unfreeRedistributable`, `"unfree-redistributable"` {#lib.licenses.unfreeredistributable-unfree-redistributable}
+
+Unfree package that can be redistributed in binary form. That is, it’s legal to redistribute the *output* of the derivation. This means that the package can be included in the Nixpkgs channel.
+
+Sometimes proprietary software can only be redistributed unmodified. Make sure the builder doesn’t actually modify the original binaries; otherwise we’re breaking the license. For instance, the NVIDIA X11 drivers can be redistributed unmodified, but our builder applies `patchelf` to make them work. Thus, its license is `"unfree"` and it cannot be included in the Nixpkgs channel.
+
+### `lib.licenses.unfree`, `"unfree"` {#lib.licenses.unfree-unfree}
+
+Unfree package that cannot be redistributed. You can build it yourself, but you cannot redistribute the output of the derivation. Thus it cannot be included in the Nixpkgs channel.
+
+### `lib.licenses.unfreeRedistributableFirmware`, `"unfree-redistributable-firmware"` {#lib.licenses.unfreeredistributablefirmware-unfree-redistributable-firmware}
+
+This package supplies unfree, redistributable firmware. This is a separate value from `unfree-redistributable` because not everybody cares whether firmware is free.
+
+## Source provenance {#sec-meta-sourceProvenance}
+
+The value of a package's `meta.sourceProvenance` attribute specifies the provenance of the package's derivation outputs.
+
+If a package contains elements that are not built from the original source by a nixpkgs derivation, the `meta.sourceProvenance` attribute should be a list containing one or more value from `lib.sourceTypes` defined in [`nixpkgs/lib/source-types.nix`](https://github.com/NixOS/nixpkgs/blob/master/lib/source-types.nix).
+
+Adding this information helps users who have needs related to build transparency and supply-chain security to gain some visibility into their installed software or set policy to allow or disallow installation based on source provenance.
+
+The presence of a particular `sourceType` in a package's `meta.sourceProvenance` list indicates that the package contains some components falling into that category, though the *absence* of that `sourceType` does not *guarantee* the absence of that category of `sourceType` in the package's contents. A package with no `meta.sourceProvenance` set implies it has no *known* `sourceType`s other than `fromSource`.
+
+The meaning of the `meta.sourceProvenance` attribute does not depend on the value of the `meta.license` attribute.
+
+### `lib.sourceTypes.fromSource` {#lib.sourceTypes.fromSource}
+
+Package elements which are produced by a nixpkgs derivation which builds them from source code.
+
+### `lib.sourceTypes.binaryNativeCode` {#lib.sourceTypes.binaryNativeCode}
+
+Native code to be executed on the target system's CPU, built by a third party. This includes packages which wrap a downloaded AppImage or Debian package.
+
+### `lib.sourceTypes.binaryFirmware` {#lib.sourceTypes.binaryFirmware}
+
+Code to be executed on a peripheral device or embedded controller, built by a third party.
+
+### `lib.sourceTypes.binaryBytecode` {#lib.sourceTypes.binaryBytecode}
+
+Code to run on a VM interpreter or JIT compiled into bytecode by a third party. This includes packages which download Java `.jar` files from another source.
diff --git a/nixpkgs/doc/stdenv/multiple-output.chapter.md b/nixpkgs/doc/stdenv/multiple-output.chapter.md
new file mode 100644
index 000000000000..5e86d2aa3d56
--- /dev/null
+++ b/nixpkgs/doc/stdenv/multiple-output.chapter.md
@@ -0,0 +1,96 @@
+# Multiple-output packages {#chap-multiple-output}
+
+The Nix language allows a derivation to produce multiple outputs, which is similar to what is utilized by other Linux distribution packaging systems. The outputs reside in separate Nix store paths, so they can be mostly handled independently of each other, including passing to build inputs, garbage collection or binary substitution. The exception is that building from source always produces all the outputs.
+
+The main motivation is to save disk space by reducing runtime closure sizes; consequently also sizes of substituted binaries get reduced. Splitting can be used to have more granular runtime dependencies, for example the typical reduction is to split away development-only files, as those are typically not needed during runtime. As a result, closure sizes of many packages can get reduced to a half or even much less.
+
+::: {.note}
+The reduction effects could be instead achieved by building the parts in completely separate derivations. That would often additionally reduce build-time closures, but it tends to be much harder to write such derivations, as build systems typically assume all parts are being built at once. This compromise approach of single source package producing multiple binary packages is also utilized often by rpm and deb.
+:::
+
+A number of attributes can be used to work with a derivation with multiple outputs.
+The attribute `outputs` is a list of strings, which are the names of the outputs.
+For each of these names, an identically named attribute is created, corresponding to that output.
+
+The attribute `meta.outputsToInstall` is used to determine the [default set of outputs to install](https://github.com/NixOS/nixpkgs/blob/08c3198f1c6fd89a09f8f0ea09b425028a34de3e/pkgs/stdenv/generic/check-meta.nix#L411-L426) when using the derivation name unqualified:
+`bin`, or `out`, or the first specified output; as well as `man` if that is specified.
+
+## Using a split package {#sec-multiple-outputs-using-split-packages}
+
+In the Nix language the individual outputs can be reached explicitly as attributes, e.g. `coreutils.info`, but the typical case is just using packages as build inputs.
+
+When a multiple-output derivation gets into a build input of another derivation, the `dev` output is added if it exists, otherwise the first output is added. In addition to that, `propagatedBuildOutputs` of that package which by default contain `$outputBin` and `$outputLib` are also added. (See [](#multiple-output-file-type-groups).)
+
+In some cases it may be desirable to combine different outputs under a single store path. A function `symlinkJoin` can be used to do this. (Note that it may negate some closure size benefits of using a multiple-output package.)
+
+## Writing a split derivation {#sec-multiple-outputs-}
+
+Here you find how to write a derivation that produces multiple outputs.
+
+In nixpkgs there is a framework supporting multiple-output derivations. It tries to cover most cases by default behavior. You can find the source separated in `<nixpkgs/pkgs/build-support/setup-hooks/multiple-outputs.sh>`; it’s relatively well-readable. The whole machinery is triggered by defining the `outputs` attribute to contain the list of desired output names (strings).
+
+```nix
+{
+  outputs = [ "bin" "dev" "out" "doc" ];
+}
+```
+
+Often such a single line is enough. For each output an equally named environment variable is passed to the builder and contains the path in nix store for that output. Typically you also want to have the main `out` output, as it catches any files that didn’t get elsewhere.
+
+::: {.note}
+There is a special handling of the `debug` output, described at [](#stdenv-separateDebugInfo).
+:::
+
+### “Binaries first” {#multiple-output-file-binaries-first-convention}
+
+A commonly adopted convention in `nixpkgs` is that executables provided by the package are contained within its first output. This convention allows the dependent packages to reference the executables provided by packages in a uniform manner. For instance, provided with the knowledge that the `perl` package contains a `perl` executable it can be referenced as `${pkgs.perl}/bin/perl` within a Nix derivation that needs to execute a Perl script.
+
+The `glibc` package is a deliberate single exception to the “binaries first” convention. The `glibc` has `libs` as its first output allowing the libraries provided by `glibc` to be referenced directly (e.g. `${glibc}/lib/ld-linux-x86-64.so.2`). The executables provided by `glibc` can be accessed via its `bin` attribute (e.g. `${lib.getBin stdenv.cc.libc}/bin/ldd`).
+
+The reason for why `glibc` deviates from the convention is because referencing a library provided by `glibc` is a very common operation among Nix packages. For instance, third-party executables packaged by Nix are typically patched and relinked with the relevant version of `glibc` libraries from Nix packages (please see the documentation on [patchelf](https://github.com/NixOS/patchelf) for more details).
+
+### File type groups {#multiple-output-file-type-groups}
+
+The support code currently recognizes some particular kinds of outputs and either instructs the build system of the package to put files into their desired outputs or it moves the files during the fixup phase. Each group of file types has an `outputFoo` variable specifying the output name where they should go. If that variable isn’t defined by the derivation writer, it is guessed – a default output name is defined, falling back to other possibilities if the output isn’t defined.
+
+#### `$outputDev` {#outputdev}
+
+is for development-only files. These include C(++) headers (`include/`), pkg-config (`lib/pkgconfig/`), cmake (`lib/cmake/`) and aclocal files (`share/aclocal/`). They go to `dev` or `out` by default.
+
+#### `$outputBin` {#outputbin}
+
+is meant for user-facing binaries, typically residing in `bin/`. They go to `bin` or `out` by default.
+
+#### `$outputLib` {#outputlib}
+
+is meant for libraries, typically residing in `lib/` and `libexec/`. They go to `lib` or `out` by default.
+
+#### `$outputDoc` {#outputdoc}
+
+is for user documentation, typically residing in `share/doc/`. It goes to `doc` or `out` by default.
+
+#### `$outputDevdoc` {#outputdevdoc}
+
+is for _developer_ documentation. Currently we count gtk-doc and devhelp books, typically residing in `share/gtk-doc/` and `share/devhelp/`, in there. It goes to `devdoc` or is removed (!) by default. This is because e.g. gtk-doc tends to be rather large and completely unused by nixpkgs users.
+
+#### `$outputMan` {#outputman}
+
+is for man pages (except for section 3), typically residing in `share/man/man[0-9]/`. They go to `man` or `$outputBin` by default.
+
+#### `$outputDevman` {#outputdevman}
+
+is for section 3 man pages, typically residing in `share/man/man[0-9]/`. They go to `devman` or `$outputMan` by default.
+
+#### `$outputInfo` {#outputinfo}
+
+is for info pages, typically residing in `share/info/`. They go to `info` or `$outputBin` by default.
+
+### Common caveats {#sec-multiple-outputs-caveats}
+
+- Some configure scripts don’t like some of the parameters passed by default by the framework, e.g. `--docdir=/foo/bar`. You can disable this by setting `setOutputFlags = false;`.
+
+- The outputs of a single derivation can retain references to each other, but note that circular references are not allowed. (And each strongly-connected component would act as a single output anyway.)
+
+- Most of split packages contain their core functionality in libraries. These libraries tend to refer to various kind of data that typically gets into `out`, e.g. locale strings, so there is often no advantage in separating the libraries into `lib`, as keeping them in `out` is easier.
+
+- Some packages have hidden assumptions on install paths, which complicates splitting.
diff --git a/nixpkgs/doc/stdenv/platform-notes.chapter.md b/nixpkgs/doc/stdenv/platform-notes.chapter.md
new file mode 100644
index 000000000000..409c9f2e7b2e
--- /dev/null
+++ b/nixpkgs/doc/stdenv/platform-notes.chapter.md
@@ -0,0 +1,67 @@
+# Platform Notes {#chap-platform-notes}
+
+## Darwin (macOS) {#sec-darwin}
+
+Some common issues when packaging software for Darwin:
+
+- The Darwin `stdenv` uses clang instead of gcc. When referring to the compiler `$CC` or `cc` will work in both cases. Some builds hardcode gcc/g++ in their build scripts, that can usually be fixed with using something like `makeFlags = [ "CC=cc" ];` or by patching the build scripts.
+
+  ```nix
+  stdenv.mkDerivation {
+    name = "libfoo-1.2.3";
+    # ...
+    buildPhase = ''
+      $CC -o hello hello.c
+    '';
+  }
+  ```
+
+- On Darwin, libraries are linked using absolute paths, libraries are resolved by their `install_name` at link time. Sometimes packages won’t set this correctly causing the library lookups to fail at runtime. This can be fixed by adding extra linker flags or by running `install_name_tool -id` during the `fixupPhase`.
+
+  ```nix
+  stdenv.mkDerivation {
+    name = "libfoo-1.2.3";
+    # ...
+    makeFlags = lib.optional stdenv.isDarwin "LDFLAGS=-Wl,-install_name,$(out)/lib/libfoo.dylib";
+  }
+  ```
+
+- Even if the libraries are linked using absolute paths and resolved via their `install_name` correctly, tests can sometimes fail to run binaries. This happens because the `checkPhase` runs before the libraries are installed.
+
+  This can usually be solved by running the tests after the `installPhase` or alternatively by using `DYLD_LIBRARY_PATH`. More information about this variable can be found in the *dyld(1)* manpage.
+
+  ```
+  dyld: Library not loaded: /nix/store/7hnmbscpayxzxrixrgxvvlifzlxdsdir-jq-1.5-lib/lib/libjq.1.dylib
+  Referenced from: /private/tmp/nix-build-jq-1.5.drv-0/jq-1.5/tests/../jq
+  Reason: image not found
+  ./tests/jqtest: line 5: 75779 Abort trap: 6
+  ```
+
+  ```nix
+  stdenv.mkDerivation {
+    name = "libfoo-1.2.3";
+    # ...
+    doInstallCheck = true;
+    installCheckTarget = "check";
+  }
+  ```
+
+- Some packages assume xcode is available and use `xcrun` to resolve build tools like `clang`, etc. This causes errors like `xcode-select: error: no developer tools were found at '/Applications/Xcode.app'` while the build doesn’t actually depend on xcode.
+
+  ```nix
+  stdenv.mkDerivation {
+    name = "libfoo-1.2.3";
+    # ...
+    prePatch = ''
+      substituteInPlace Makefile \
+          --replace-fail '/usr/bin/xcrun clang' clang
+    '';
+  }
+  ```
+
+  The package `xcbuild` can be used to build projects that really depend on Xcode. However, this replacement is not 100% compatible with Xcode and can occasionally cause issues.
+
+- x86_64-darwin uses the 10.12 SDK by default, but some software is not compatible with that version of the SDK. In that case,
+  the 11.0 SDK used by aarch64-darwin is available for use on x86_64-darwin. To use it, reference `apple_sdk_11_0` instead of
+  `apple_sdk` in your derivation and use `pkgs.darwin.apple_sdk_11_0.callPackage` instead of `pkgs.callPackage`. On Linux, this will
+  have the same effect as `pkgs.callPackage`, so you can use `pkgs.darwin.apple_sdk_11_0.callPackage` regardless of platform.
diff --git a/nixpkgs/doc/stdenv/stdenv.chapter.md b/nixpkgs/doc/stdenv/stdenv.chapter.md
new file mode 100644
index 000000000000..a1e27b7bdf7f
--- /dev/null
+++ b/nixpkgs/doc/stdenv/stdenv.chapter.md
@@ -0,0 +1,1664 @@
+# The Standard Environment {#chap-stdenv}
+
+The standard build environment in the Nix Packages collection provides an environment for building Unix packages that does a lot of common build tasks automatically. In fact, for Unix packages that use the standard `./configure; make; make install` build interface, you don’t need to write a build script at all; the standard environment does everything automatically. If `stdenv` doesn’t do what you need automatically, you can easily customise or override the various build phases.
+
+## Using `stdenv` {#sec-using-stdenv}
+
+To build a package with the standard environment, you use the function `stdenv.mkDerivation`, instead of the primitive built-in function `derivation`, e.g.
+
+```nix
+stdenv.mkDerivation {
+  name = "libfoo-1.2.3";
+  src = fetchurl {
+    url = "http://example.org/libfoo-1.2.3.tar.bz2";
+    hash = "sha256-tWxU/LANbQE32my+9AXyt3nCT7NBVfJ45CX757EMT3Q=";
+  };
+}
+```
+
+(`stdenv` needs to be in scope, so if you write this in a separate Nix expression from `pkgs/all-packages.nix`, you need to pass it as a function argument.) Specifying a `name` and a `src` is the absolute minimum Nix requires. For convenience, you can also use `pname` and `version` attributes and `mkDerivation` will automatically set `name` to `"${pname}-${version}"` by default.
+**Since [RFC 0035](https://github.com/NixOS/rfcs/pull/35), this is preferred for packages in Nixpkgs**, as it allows us to reuse the version easily:
+
+```nix
+stdenv.mkDerivation rec {
+  pname = "libfoo";
+  version = "1.2.3";
+  src = fetchurl {
+    url = "http://example.org/libfoo-source-${version}.tar.bz2";
+    hash = "sha256-tWxU/LANbQE32my+9AXyt3nCT7NBVfJ45CX757EMT3Q=";
+  };
+}
+```
+
+Many packages have dependencies that are not provided in the standard environment. It’s usually sufficient to specify those dependencies in the `buildInputs` attribute:
+
+```nix
+stdenv.mkDerivation {
+  pname = "libfoo";
+  version = "1.2.3";
+  # ...
+  buildInputs = [libbar perl ncurses];
+}
+```
+
+This attribute ensures that the `bin` subdirectories of these packages appear in the `PATH` environment variable during the build, that their `include` subdirectories are searched by the C compiler, and so on. (See [](#ssec-setup-hooks) for details.)
+
+Often it is necessary to override or modify some aspect of the build. To make this easier, the standard environment breaks the package build into a number of *phases*, all of which can be overridden or modified individually: unpacking the sources, applying patches, configuring, building, and installing. (There are some others; see [](#sec-stdenv-phases).) For instance, a package that doesn’t supply a makefile but instead has to be compiled "manually" could be handled like this:
+
+```nix
+stdenv.mkDerivation {
+  pname = "fnord";
+  version = "4.5";
+  # ...
+  buildPhase = ''
+    gcc foo.c -o foo
+  '';
+  installPhase = ''
+    mkdir -p $out/bin
+    cp foo $out/bin
+  '';
+}
+```
+
+(Note the use of `''`-style string literals, which are very convenient for large multi-line script fragments because they don’t need escaping of `"` and `\`, and because indentation is intelligently removed.)
+
+There are many other attributes to customise the build. These are listed in [](#ssec-stdenv-attributes).
+
+While the standard environment provides a generic builder, you can still supply your own build script:
+
+```nix
+stdenv.mkDerivation {
+  pname = "libfoo";
+  version = "1.2.3";
+  # ...
+  builder = ./builder.sh;
+}
+```
+
+where the builder can do anything it wants, but typically starts with
+
+```bash
+source $stdenv/setup
+```
+
+to let `stdenv` set up the environment (e.g. by resetting `PATH` and populating it from build inputs). If you want, you can still use `stdenv`’s generic builder:
+
+```bash
+source $stdenv/setup
+
+buildPhase() {
+  echo "... this is my custom build phase ..."
+  gcc foo.c -o foo
+}
+
+installPhase() {
+  mkdir -p $out/bin
+  cp foo $out/bin
+}
+
+genericBuild
+```
+
+### Building a `stdenv` package in `nix-shell` {#sec-building-stdenv-package-in-nix-shell}
+
+To build a `stdenv` package in a [`nix-shell`](https://nixos.org/manual/nix/unstable/command-ref/nix-shell.html), enter a shell, find the [phases](#sec-stdenv-phases) you wish to build, then invoke `genericBuild` manually:
+
+Go to an empty directory, invoke `nix-shell` with the desired package, and from inside the shell, set the output variables to a writable directory:
+
+```bash
+cd "$(mktemp -d)"
+nix-shell '<nixpkgs>' -A some_package
+export out=$(pwd)/out
+```
+
+Next, invoke the desired parts of the build.
+First, run the phases that generate a working copy of the sources, which will change directory to the sources for you:
+
+```bash
+phases="${prePhases[*]:-} unpackPhase patchPhase" genericBuild
+```
+
+Then, run more phases up until the failure is reached.
+If the failure is in the build or check phase, the following phases would be required:
+
+```bash
+phases="${preConfigurePhases[*]:-} configurePhase ${preBuildPhases[*]:-} buildPhase checkPhase" genericBuild
+```
+
+Use this command to run all install phases:
+```bash
+phases="${preInstallPhases[*]:-} installPhase ${preFixupPhases[*]:-} fixupPhase installCheckPhase" genericBuild
+```
+
+Single phase can be re-run as many times as necessary to examine the failure like so:
+
+```bash
+phases="buildPhase" genericBuild
+```
+
+To modify a [phase](#sec-stdenv-phases), first print it with
+
+```bash
+echo "$buildPhase"
+```
+
+Or, if that is empty, for instance, if it is using a function:
+
+```bash
+type buildPhase
+```
+
+then change it in a text editor, and paste it back to the terminal.
+
+::: {.note}
+This method may have some inconsistencies in environment variables and behaviour compared to a normal build within the [Nix build sandbox](https://nixos.org/manual/nix/unstable/language/derivations#builder-execution).
+The following is a non-exhaustive list of such differences:
+
+- `TMP`, `TMPDIR`, and similar variables likely point to non-empty directories that the build might conflict with files in.
+- Output store paths are not writable, so the variables for outputs need to be overridden to writable paths.
+- Other environment variables may be inconsistent with a `nix-build` either due to `nix-shell`'s initialization script or due to the use of `nix-shell` without the `--pure` option.
+
+If the build fails differently inside the shell than in the sandbox, consider using [`breakpointHook`](#breakpointhook) and invoking `nix-build` instead.
+The [`--keep-failed`](https://nixos.org/manual/nix/unstable/command-ref/conf-file#opt--keep-failed) option for `nix-build` may also be useful to examine the build directory of a failed build.
+:::
+
+## Tools provided by `stdenv` {#sec-tools-of-stdenv}
+
+The standard environment provides the following packages:
+
+- The GNU C Compiler, configured with C and C++ support.
+- GNU coreutils (contains a few dozen standard Unix commands).
+- GNU findutils (contains `find`).
+- GNU diffutils (contains `diff`, `cmp`).
+- GNU `sed`.
+- GNU `grep`.
+- GNU `awk`.
+- GNU `tar`.
+- `gzip`, `bzip2` and `xz`.
+- GNU Make.
+- Bash. This is the shell used for all builders in the Nix Packages collection. Not using `/bin/sh` removes a large source of portability problems.
+- The `patch` command.
+
+On Linux, `stdenv` also includes the `patchelf` utility.
+
+## Specifying dependencies {#ssec-stdenv-dependencies}
+
+Build systems often require more dependencies than just what `stdenv` provides. This section describes attributes accepted by `stdenv.mkDerivation` that can be used to make these dependencies available to the build system.
+
+### Overview {#ssec-stdenv-dependencies-overview}
+
+A full reference of the different kinds of dependencies is provided in [](#ssec-stdenv-dependencies-reference), but here is an overview of the most common ones.
+It should cover most use cases.
+
+Add dependencies to `nativeBuildInputs` if they are executed during the build:
+- those which are needed on `$PATH` during the build, for example `cmake` and `pkg-config`
+- [setup hooks](#ssec-setup-hooks), for example [`makeWrapper`](#fun-makeWrapper)
+- interpreters needed by [`patchShebangs`](#patch-shebangs.sh) for build scripts (with the `--build` flag), which can be the case for e.g. `perl`
+
+Add dependencies to `buildInputs` if they will end up copied or linked into the final output or otherwise used at runtime:
+- libraries used by compilers, for example `zlib`,
+- interpreters needed by [`patchShebangs`](#patch-shebangs.sh) for scripts which are installed, which can be the case for e.g. `perl`
+
+::: {.note}
+These criteria are independent.
+
+For example, software using Wayland usually needs the `wayland` library at runtime, so `wayland` should be added to `buildInputs`.
+But it also executes the `wayland-scanner` program as part of the build to generate code, so `wayland` should also be added to `nativeBuildInputs`.
+:::
+
+Dependencies needed only to run tests are similarly classified between native (executed during build) and non-native (executed at runtime):
+- `nativeCheckInputs` for test tools needed on `$PATH` (such as `ctest`) and [setup hooks](#ssec-setup-hooks) (for example [`pytestCheckHook`](#python))
+- `checkInputs` for libraries linked into test executables (for example the `qcheck` OCaml package)
+
+These dependencies are only injected when [`doCheck`](#var-stdenv-doCheck) is set to `true`.
+
+#### Example {#ssec-stdenv-dependencies-overview-example}
+
+Consider for example this simplified derivation for `solo5`, a sandboxing tool:
+```nix
+stdenv.mkDerivation rec {
+  pname = "solo5";
+  version = "0.7.5";
+
+  src = fetchurl {
+    url = "https://github.com/Solo5/solo5/releases/download/v${version}/solo5-v${version}.tar.gz";
+    hash = "sha256-viwrS9lnaU8sTGuzK/+L/PlMM/xRRtgVuK5pixVeDEw=";
+  };
+
+  nativeBuildInputs = [ makeWrapper pkg-config ];
+  buildInputs = [ libseccomp ];
+
+  postInstall = ''
+    substituteInPlace $out/bin/solo5-virtio-mkimage \
+      --replace-fail "/usr/lib/syslinux" "${syslinux}/share/syslinux" \
+      --replace-fail "/usr/share/syslinux" "${syslinux}/share/syslinux" \
+      --replace-fail "cp " "cp --no-preserve=mode "
+
+    wrapProgram $out/bin/solo5-virtio-mkimage \
+      --prefix PATH : ${lib.makeBinPath [ dosfstools mtools parted syslinux ]}
+  '';
+
+  doCheck = true;
+  nativeCheckInputs = [ util-linux qemu ];
+  checkPhase = '' [elided] '';
+}
+```
+
+- `makeWrapper` is a setup hook, i.e., a shell script sourced by the generic builder of `stdenv`.
+  It is thus executed during the build and must be added to `nativeBuildInputs`.
+- `pkg-config` is a build tool which the configure script of `solo5` expects to be on `$PATH` during the build:
+  therefore, it must be added to `nativeBuildInputs`.
+- `libseccomp` is a library linked into `$out/bin/solo5-elftool`.
+  As it is used at runtime, it must be added to `buildInputs`.
+- Tests need `qemu` and `getopt` (from `util-linux`) on `$PATH`, these must be added to `nativeCheckInputs`.
+- Some dependencies are injected directly in the shell code of phases: `syslinux`, `dosfstools`, `mtools`, and `parted`.
+In this specific case, they will end up in the output of the derivation (`$out` here).
+As Nix marks dependencies whose absolute path is present in the output as runtime dependencies, adding them to `buildInputs` is not required.
+
+For more complex cases, like libraries linked into an executable which is then executed as part of the build system, see [](#ssec-stdenv-dependencies-reference).
+
+### Reference {#ssec-stdenv-dependencies-reference}
+
+As described in the Nix manual, almost any `*.drv` store path in a derivation’s attribute set will induce a dependency on that derivation. `mkDerivation`, however, takes a few attributes intended to include all the dependencies of a package. This is done both for structure and consistency, but also so that certain other setup can take place. For example, certain dependencies need their bin directories added to the `PATH`. That is built-in, but other setup is done via a pluggable mechanism that works in conjunction with these dependency attributes. See [](#ssec-setup-hooks) for details.
+
+Dependencies can be broken down along these axes: their host and target platforms relative to the new derivation’s. The platform distinctions are motivated by cross compilation; see [](#chap-cross) for exactly what each platform means. [^footnote-stdenv-ignored-build-platform] But even if one is not cross compiling, the platforms imply whether a dependency is needed at run-time or build-time.
+
+The extension of `PATH` with dependencies, alluded to above, proceeds according to the relative platforms alone. The process is carried out only for dependencies whose host platform matches the new derivation’s build platform i.e. dependencies which run on the platform where the new derivation will be built. [^footnote-stdenv-native-dependencies-in-path] For each dependency \<dep\> of those dependencies, `dep/bin`, if present, is added to the `PATH` environment variable.
+
+### Dependency propagation {#ssec-stdenv-dependencies-propagated}
+
+Propagated dependencies are made available to all downstream dependencies.
+This is particularly useful for interpreted languages, where all transitive dependencies have to be present in the same environment.
+Therefore it is used for the Python infrastructure in Nixpkgs.
+
+:::{.note}
+Propagated dependencies should be used with care, because they obscure the actual build inputs of dependent derivations and cause side effects through setup hooks.
+This can lead to conflicting dependencies that cannot easily be resolved.
+:::
+
+:::{.example}
+# A propagated dependency
+
+```nix
+with import <nixpkgs> {};
+let
+  bar = stdenv.mkDerivation {
+    name = "bar";
+    dontUnpack = true;
+    # `hello` is also made available to dependents, such as `foo`
+    propagatedBuildInputs = [ hello ];
+    postInstall = "mkdir $out";
+  };
+  foo = stdenv.mkDerivation {
+    name = "foo";
+    dontUnpack = true;
+    # `bar` is a direct dependency, which implicitly includes the propagated `hello`
+    buildInputs = [ bar ];
+    # The `hello` binary is available!
+    postInstall = "hello > $out";
+  };
+in
+foo
+```
+:::
+
+Dependency propagation takes cross compilation into account, meaning that dependencies that cross platform boundaries are properly adjusted.
+
+To determine the exact rules for dependency propagation, we start by assigning to each dependency a couple of ternary numbers (`-1` for `build`, `0` for `host`, and `1` for `target`) representing its [dependency type](#possible-dependency-types), which captures how its host and target platforms are each "offset" from the depending derivation’s host and target platforms. The following table summarize the different combinations that can be obtained:
+
+| `host → target`     | attribute name      | offset   |
+| ------------------- | ------------------- | -------- |
+| `build --> build`   | `depsBuildBuild`    | `-1, -1` |
+| `build --> host`    | `nativeBuildInputs` | `-1, 0`  |
+| `build --> target`  | `depsBuildTarget`   | `-1, 1`  |
+| `host --> host`     | `depsHostHost`      | `0, 0`   |
+| `host --> target`   | `buildInputs`       | `0, 1`   |
+| `target --> target` | `depsTargetTarget`  | `1, 1`   |
+
+Algorithmically, we traverse propagated inputs, accumulating every propagated dependency’s propagated dependencies and adjusting them to account for the “shift in perspective” described by the current dependency’s platform offsets. This results is sort of a transitive closure of the dependency relation, with the offsets being approximately summed when two dependency links are combined. We also prune transitive dependencies whose combined offsets go out-of-bounds, which can be viewed as a filter over that transitive closure removing dependencies that are blatantly absurd.
+
+We can define the process precisely with [Natural Deduction](https://en.wikipedia.org/wiki/Natural_deduction) using the inference rules. This probably seems a bit obtuse, but so is the bash code that actually implements it! [^footnote-stdenv-find-inputs-location] They’re confusing in very different ways so… hopefully if something doesn’t make sense in one presentation, it will in the other!
+
+```
+let mapOffset(h, t, i) = i + (if i <= 0 then h else t - 1)
+
+propagated-dep(h0, t0, A, B)
+propagated-dep(h1, t1, B, C)
+h0 + h1 in {-1, 0, 1}
+h0 + t1 in {-1, 0, 1}
+-------------------------------------- Transitive property
+propagated-dep(mapOffset(h0, t0, h1),
+               mapOffset(h0, t0, t1),
+               A, C)
+```
+
+```
+let mapOffset(h, t, i) = i + (if i <= 0 then h else t - 1)
+
+dep(h0, t0, A, B)
+propagated-dep(h1, t1, B, C)
+h0 + h1 in {-1, 0, 1}
+h0 + t1 in {-1, 0, -1}
+----------------------------- Take immediate dependencies' propagated dependencies
+propagated-dep(mapOffset(h0, t0, h1),
+               mapOffset(h0, t0, t1),
+               A, C)
+```
+
+```
+propagated-dep(h, t, A, B)
+----------------------------- Propagated dependencies count as dependencies
+dep(h, t, A, B)
+```
+
+Some explanation of this monstrosity is in order. In the common case, the target offset of a dependency is the successor to the target offset: `t = h + 1`. That means that:
+
+```
+let f(h, t, i) = i + (if i <= 0 then h else t - 1)
+let f(h, h + 1, i) = i + (if i <= 0 then h else (h + 1) - 1)
+let f(h, h + 1, i) = i + (if i <= 0 then h else h)
+let f(h, h + 1, i) = i + h
+```
+
+This is where “sum-like” comes in from above: We can just sum all of the host offsets to get the host offset of the transitive dependency. The target offset is the transitive dependency is the host offset + 1, just as it was with the dependencies composed to make this transitive one; it can be ignored as it doesn’t add any new information.
+
+Because of the bounds checks, the uncommon cases are `h = t` and `h + 2 = t`. In the former case, the motivation for `mapOffset` is that since its host and target platforms are the same, no transitive dependency of it should be able to “discover” an offset greater than its reduced target offsets. `mapOffset` effectively “squashes” all its transitive dependencies’ offsets so that none will ever be greater than the target offset of the original `h = t` package. In the other case, `h + 1` is skipped over between the host and target offsets. Instead of squashing the offsets, we need to “rip” them apart so no transitive dependencies’ offset is that one.
+
+Overall, the unifying theme here is that propagation shouldn’t be introducing transitive dependencies involving platforms the depending package is unaware of. \[One can imagine the depending package asking for dependencies with the platforms it knows about; other platforms it doesn’t know how to ask for. The platform description in that scenario is a kind of unforgeable capability.\] The offset bounds checking and definition of `mapOffset` together ensure that this is the case. Discovering a new offset is discovering a new platform, and since those platforms weren’t in the derivation “spec” of the needing package, they cannot be relevant. From a capability perspective, we can imagine that the host and target platforms of a package are the capabilities a package requires, and the depending package must provide the capability to the dependency.
+
+#### Variables specifying dependencies {#variables-specifying-dependencies}
+
+##### `depsBuildBuild` {#var-stdenv-depsBuildBuild}
+
+A list of dependencies whose host and target platforms are the new derivation’s build platform. These are programs and libraries used at build time that produce programs and libraries also used at build time. If the dependency doesn’t care about the target platform (i.e. isn’t a compiler or similar tool), put it in `nativeBuildInputs` instead. The most common use of this `buildPackages.stdenv.cc`, the default C compiler for this role. That example crops up more than one might think in old commonly used C libraries.
+
+Since these packages are able to be run at build-time, they are always added to the `PATH`, as described above. But since these packages are only guaranteed to be able to run then, they shouldn’t persist as run-time dependencies. This isn’t currently enforced, but could be in the future.
+
+##### `nativeBuildInputs` {#var-stdenv-nativeBuildInputs}
+
+A list of dependencies whose host platform is the new derivation’s build platform, and target platform is the new derivation’s host platform. These are programs and libraries used at build-time that, if they are a compiler or similar tool, produce code to run at run-time—i.e. tools used to build the new derivation. If the dependency doesn’t care about the target platform (i.e. isn’t a compiler or similar tool), put it here, rather than in `depsBuildBuild` or `depsBuildTarget`. This could be called `depsBuildHost` but `nativeBuildInputs` is used for historical continuity.
+
+Since these packages are able to be run at build-time, they are added to the `PATH`, as described above. But since these packages are only guaranteed to be able to run then, they shouldn’t persist as run-time dependencies. This isn’t currently enforced, but could be in the future.
+
+##### `depsBuildTarget` {#var-stdenv-depsBuildTarget}
+
+A list of dependencies whose host platform is the new derivation’s build platform, and target platform is the new derivation’s target platform. These are programs used at build time that produce code to run with code produced by the depending package. Most commonly, these are tools used to build the runtime or standard library that the currently-being-built compiler will inject into any code it compiles. In many cases, the currently-being-built-compiler is itself employed for that task, but when that compiler won’t run (i.e. its build and host platform differ) this is not possible. Other times, the compiler relies on some other tool, like binutils, that is always built separately so that the dependency is unconditional.
+
+This is a somewhat confusing concept to wrap one’s head around, and for good reason. As the only dependency type where the platform offsets, `-1` and `1`, are not adjacent integers, it requires thinking of a bootstrapping stage *two* away from the current one. It and its use-case go hand in hand and are both considered poor form: try to not need this sort of dependency, and try to avoid building standard libraries and runtimes in the same derivation as the compiler produces code using them. Instead strive to build those like a normal library, using the newly-built compiler just as a normal library would. In short, do not use this attribute unless you are packaging a compiler and are sure it is needed.
+
+Since these packages are able to run at build time, they are added to the `PATH`, as described above. But since these packages are only guaranteed to be able to run then, they shouldn’t persist as run-time dependencies. This isn’t currently enforced, but could be in the future.
+
+##### `depsHostHost` {#var-stdenv-depsHostHost}
+
+A list of dependencies whose host and target platforms match the new derivation’s host platform. In practice, this would usually be tools used by compilers for macros or a metaprogramming system, or libraries used by the macros or metaprogramming code itself. It’s always preferable to use a `depsBuildBuild` dependency in the derivation being built over a `depsHostHost` on the tool doing the building for this purpose.
+
+##### `buildInputs` {#var-stdenv-buildInputs}
+
+A list of dependencies whose host platform and target platform match the new derivation’s. This would be called `depsHostTarget` but for historical continuity. If the dependency doesn’t care about the target platform (i.e. isn’t a compiler or similar tool), put it here, rather than in `depsBuildBuild`.
+
+These are often programs and libraries used by the new derivation at *run*-time, but that isn’t always the case. For example, the machine code in a statically-linked library is only used at run-time, but the derivation containing the library is only needed at build-time. Even in the dynamic case, the library may also be needed at build-time to appease the linker.
+
+##### `depsTargetTarget` {#var-stdenv-depsTargetTarget}
+
+A list of dependencies whose host platform matches the new derivation’s target platform. These are packages that run on the target platform, e.g. the standard library or run-time deps of standard library that a compiler insists on knowing about. It’s poor form in almost all cases for a package to depend on another from a future stage \[future stage corresponding to positive offset\]. Do not use this attribute unless you are packaging a compiler and are sure it is needed.
+
+##### `depsBuildBuildPropagated` {#var-stdenv-depsBuildBuildPropagated}
+
+The propagated equivalent of `depsBuildBuild`. This perhaps never ought to be used, but it is included for consistency \[see below for the others\].
+
+##### `propagatedNativeBuildInputs` {#var-stdenv-propagatedNativeBuildInputs}
+
+The propagated equivalent of `nativeBuildInputs`. This would be called `depsBuildHostPropagated` but for historical continuity. For example, if package `Y` has `propagatedNativeBuildInputs = [X]`, and package `Z` has `buildInputs = [Y]`, then package `Z` will be built as if it included package `X` in its `nativeBuildInputs`. If instead, package `Z` has `nativeBuildInputs = [Y]`, then `Z` will be built as if it included `X` in the `depsBuildBuild` of package `Z`, because of the sum of the two `-1` host offsets.
+
+##### `depsBuildTargetPropagated` {#var-stdenv-depsBuildTargetPropagated}
+
+The propagated equivalent of `depsBuildTarget`. This is prefixed for the same reason of alerting potential users.
+
+##### `depsHostHostPropagated` {#var-stdenv-depsHostHostPropagated}
+
+The propagated equivalent of `depsHostHost`.
+
+##### `propagatedBuildInputs` {#var-stdenv-propagatedBuildInputs}
+
+The propagated equivalent of `buildInputs`. This would be called `depsHostTargetPropagated` but for historical continuity.
+
+##### `depsTargetTargetPropagated` {#var-stdenv-depsTargetTargetPropagated}
+
+The propagated equivalent of `depsTargetTarget`. This is prefixed for the same reason of alerting potential users.
+
+## Attributes {#ssec-stdenv-attributes}
+
+### Variables affecting `stdenv` initialisation {#variables-affecting-stdenv-initialisation}
+
+#### `NIX_DEBUG` {#var-stdenv-NIX_DEBUG}
+
+A number between 0 and 7 indicating how much information to log. If set to 1 or higher, `stdenv` will print moderate debugging information during the build. In particular, the `gcc` and `ld` wrapper scripts will print out the complete command line passed to the wrapped tools. If set to 6 or higher, the `stdenv` setup script will be run with `set -x` tracing. If set to 7 or higher, the `gcc` and `ld` wrapper scripts will also be run with `set -x` tracing.
+
+### Attributes affecting build properties {#attributes-affecting-build-properties}
+
+#### `enableParallelBuilding` {#var-stdenv-enableParallelBuilding}
+
+If set to `true`, `stdenv` will pass specific flags to `make` and other build tools to enable parallel building with up to `build-cores` workers.
+
+Unless set to `false`, some build systems with good support for parallel building including `cmake`, `meson`, and `qmake` will set it to `true`.
+
+### Special variables {#special-variables}
+
+#### `passthru` {#var-stdenv-passthru}
+
+This is an attribute set which can be filled with arbitrary values. For example:
+
+```nix
+{
+  passthru = {
+    foo = "bar";
+    baz = {
+      value1 = 4;
+      value2 = 5;
+    };
+  };
+}
+```
+
+Values inside it are not passed to the builder, so you can change them without triggering a rebuild. However, they can be accessed outside of a derivation directly, as if they were set inside a derivation itself, e.g. `hello.baz.value1`. We don’t specify any usage or schema of `passthru` - it is meant for values that would be useful outside the derivation in other parts of a Nix expression (e.g. in other derivations). An example would be to convey some specific dependency of your derivation which contains a program with plugins support. Later, others who make derivations with plugins can use passed-through dependency to ensure that their plugin would be binary-compatible with built program.
+
+#### `passthru.updateScript` {#var-passthru-updateScript}
+
+A script to be run by `maintainers/scripts/update.nix` when the package is matched. The attribute can contain one of the following:
+
+- []{#var-passthru-updateScript-command} an executable file, either on the file system:
+
+  ```nix
+  {
+    passthru.updateScript = ./update.sh;
+  }
+  ```
+
+  or inside the expression itself:
+
+  ```nix
+  {
+    passthru.updateScript = writeScript "update-zoom-us" ''
+      #!/usr/bin/env nix-shell
+      #!nix-shell -i bash -p curl pcre2 common-updater-scripts
+
+      set -eu -o pipefail
+
+      version="$(curl -sI https://zoom.us/client/latest/zoom_x86_64.tar.xz | grep -Fi 'Location:' | pcre2grep -o1 '/(([0-9]\.?)+)/')"
+      update-source-version zoom-us "$version"
+    '';
+  }
+  ```
+
+- a list, a script followed by arguments to be passed to it:
+
+  ```nix
+  {
+    passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ];
+  }
+  ```
+
+- an attribute set containing:
+  - [`command`]{#var-passthru-updateScript-set-command} – a string or list in the [format expected by `passthru.updateScript`](#var-passthru-updateScript-command).
+  - [`attrPath`]{#var-passthru-updateScript-set-attrPath} (optional) – a string containing the canonical attribute path for the package. If present, it will be passed to the update script instead of the attribute path on which the package was discovered during Nixpkgs traversal.
+  - [`supportedFeatures`]{#var-passthru-updateScript-set-supportedFeatures} (optional) – a list of the [extra features](#var-passthru-updateScript-supported-features) the script supports.
+
+  ```nix
+  {
+    passthru.updateScript = {
+      command = [ ../../update.sh pname ];
+      attrPath = pname;
+      supportedFeatures = [ /* ... */ ];
+    };
+  }
+  ```
+
+::: {.tip}
+A common pattern is to use the [`nix-update-script`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/common-updater/nix-update.nix) attribute provided in Nixpkgs, which runs [`nix-update`](https://github.com/Mic92/nix-update):
+
+```nix
+{
+  passthru.updateScript = nix-update-script { };
+}
+```
+
+For simple packages, this is often enough, and will ensure that the package is updated automatically by [`nixpkgs-update`](https://ryantm.github.io/nixpkgs-update) when a new version is released. The [update bot](https://nix-community.org/update-bot) runs periodically to attempt to automatically update packages, and will run `passthru.updateScript` if set. While not strictly necessary if the project is listed on [Repology](https://repology.org), using `nix-update-script` allows the package to update via many more sources (e.g. GitHub releases).
+:::
+
+##### How update scripts are executed? {#var-passthru-updateScript-execution}
+
+Update scripts are to be invoked by `maintainers/scripts/update.nix` script. You can run `nix-shell maintainers/scripts/update.nix` in the root of Nixpkgs repository for information on how to use it. `update.nix` offers several modes for selecting packages to update (e.g. select by attribute path, traverse Nixpkgs and filter by maintainer, etc.), and it will execute update scripts for all matched packages that have an `updateScript` attribute.
+
+Each update script will be passed the following environment variables:
+
+- [`UPDATE_NIX_NAME`]{#var-passthru-updateScript-env-UPDATE_NIX_NAME} – content of the `name` attribute of the updated package.
+- [`UPDATE_NIX_PNAME`]{#var-passthru-updateScript-env-UPDATE_NIX_PNAME} – content of the `pname` attribute of the updated package.
+- [`UPDATE_NIX_OLD_VERSION`]{#var-passthru-updateScript-env-UPDATE_NIX_OLD_VERSION} – content of the `version` attribute of the updated package.
+- [`UPDATE_NIX_ATTR_PATH`]{#var-passthru-updateScript-env-UPDATE_NIX_ATTR_PATH} – attribute path the `update.nix` discovered the package on (or the [canonical `attrPath`](#var-passthru-updateScript-set-attrPath) when available). Example: `pantheon.elementary-terminal`
+
+::: {.note}
+An update script will be usually run from the root of the Nixpkgs repository but you should not rely on that. Also note that `update.nix` executes update scripts in parallel by default so you should avoid running `git commit` or any other commands that cannot handle that.
+:::
+
+::: {.tip}
+While update scripts should not create commits themselves, `maintainers/scripts/update.nix` supports automatically creating commits when running it with `--argstr commit true`. If you need to customize commit message, you can have the update script implement [`commit`](#var-passthru-updateScript-commit) feature.
+:::
+
+##### Supported features {#var-passthru-updateScript-supported-features}
+###### `commit` {#var-passthru-updateScript-commit}
+
+This feature allows update scripts to *ask* `update.nix` to create Git commits.
+
+When support of this feature is declared, whenever the update script exits with `0` return status, it is expected to print a JSON list containing an object described below for each updated attribute to standard output.
+
+When `update.nix` is run with `--argstr commit true` arguments, it will create a separate commit for each of the objects. An empty list can be returned when the script did not update any files, for example, when the package is already at the latest version.
+
+The commit object contains the following values:
+
+- [`attrPath`]{#var-passthru-updateScript-commit-attrPath} – a string containing attribute path.
+- [`oldVersion`]{#var-passthru-updateScript-commit-oldVersion} – a string containing old version.
+- [`newVersion`]{#var-passthru-updateScript-commit-newVersion} – a string containing new version.
+- [`files`]{#var-passthru-updateScript-commit-files} – a non-empty list of file paths (as strings) to add to the commit.
+- [`commitBody`]{#var-passthru-updateScript-commit-commitBody} (optional) – a string with extra content to be appended to the default commit message (useful for adding changelog links).
+- [`commitMessage`]{#var-passthru-updateScript-commit-commitMessage} (optional) – a string to use instead of the default commit message.
+
+If the returned array contains exactly one object (e.g. `[{}]`), all values are optional and will be determined automatically.
+
+::: {.example #var-passthru-updateScript-example-commit}
+# Standard output of an update script using commit feature
+
+```json
+[
+  {
+    "attrPath": "volume_key",
+    "oldVersion": "0.3.11",
+    "newVersion": "0.3.12",
+    "files": [
+      "/path/to/nixpkgs/pkgs/development/libraries/volume-key/default.nix"
+    ]
+  }
+]
+```
+:::
+
+### Fixed-point arguments of `mkDerivation` {#mkderivation-recursive-attributes}
+
+If you pass a function to `mkDerivation`, it will receive as its argument the final arguments, including the overrides when reinvoked via `overrideAttrs`. For example:
+
+```nix
+mkDerivation (finalAttrs: {
+  pname = "hello";
+  withFeature = true;
+  configureFlags =
+    lib.optionals finalAttrs.withFeature ["--with-feature"];
+})
+```
+
+Note that this does not use the `rec` keyword to reuse `withFeature` in `configureFlags`.
+The `rec` keyword works at the syntax level and is unaware of overriding.
+
+Instead, the definition references `finalAttrs`, allowing users to change `withFeature`
+consistently with `overrideAttrs`.
+
+`finalAttrs` also contains the attribute `finalPackage`, which includes the output paths, etc.
+
+Let's look at a more elaborate example to understand the differences between
+various bindings:
+
+```nix
+# `pkg` is the _original_ definition (for illustration purposes)
+let pkg =
+  mkDerivation (finalAttrs: {
+    # ...
+
+    # An example attribute
+    packages = [];
+
+    # `passthru.tests` is a commonly defined attribute.
+    passthru.tests.simple = f finalAttrs.finalPackage;
+
+    # An example of an attribute containing a function
+    passthru.appendPackages = packages':
+      finalAttrs.finalPackage.overrideAttrs (newSelf: super: {
+        packages = super.packages ++ packages';
+      });
+
+    # For illustration purposes; referenced as
+    # `(pkg.overrideAttrs(x)).finalAttrs` etc in the text below.
+    passthru.finalAttrs = finalAttrs;
+    passthru.original = pkg;
+  });
+in pkg
+```
+
+Unlike the `pkg` binding in the above example, the `finalAttrs` parameter always references the final attributes. For instance `(pkg.overrideAttrs(x)).finalAttrs.finalPackage` is identical to `pkg.overrideAttrs(x)`, whereas `(pkg.overrideAttrs(x)).original` is the same as the original `pkg`.
+
+See also the section about [`passthru.tests`](#var-meta-tests).
+
+## Phases {#sec-stdenv-phases}
+
+`stdenv.mkDerivation` sets the Nix [derivation](https://nixos.org/manual/nix/stable/expressions/derivations.html#derivations)'s builder to a script that loads the stdenv `setup.sh` bash library and calls `genericBuild`. Most packaging functions rely on this default builder.
+
+This generic command either invokes a script at *buildCommandPath*, or a *buildCommand*, or a number of *phases*. Package builds are split into phases to make it easier to override specific parts of the build (e.g., unpacking the sources or installing the binaries).
+
+Each phase can be overridden in its entirety either by setting the environment variable `namePhase` to a string containing some shell commands to be executed, or by redefining the shell function `namePhase`. The former is convenient to override a phase from the derivation, while the latter is convenient from a build script. However, typically one only wants to *add* some commands to a phase, e.g. by defining `postInstall` or `preFixup`, as skipping some of the default actions may have unexpected consequences. The default script for each phase is defined in the file `pkgs/stdenv/generic/setup.sh`.
+
+When overriding a phase, for example `installPhase`, it is important to start with `runHook preInstall` and end it with `runHook postInstall`, otherwise `preInstall` and `postInstall` will not be run. Even if you don't use them directly, it is good practice to do so anyways for downstream users who would want to add a `postInstall` by overriding your derivation.
+
+While inside an interactive `nix-shell`, if you wanted to run all phases in the order they would be run in an actual build, you can invoke `genericBuild` yourself.
+
+### Controlling phases {#ssec-controlling-phases}
+
+There are a number of variables that control what phases are executed and in what order:
+
+#### Variables affecting phase control {#variables-affecting-phase-control}
+
+##### `phases` {#var-stdenv-phases}
+
+Specifies the phases. You can change the order in which phases are executed, or add new phases, by setting this variable. If it’s not set, the default value is used, which is `$prePhases unpackPhase patchPhase $preConfigurePhases configurePhase $preBuildPhases buildPhase checkPhase $preInstallPhases installPhase fixupPhase installCheckPhase $preDistPhases distPhase $postPhases`.
+
+It is discouraged to set this variable, as it is easy to miss some important functionality hidden in some of the less obviously needed phases (like `fixupPhase` which patches the shebang of scripts).
+Usually, if you just want to add a few phases, it’s more convenient to set one of the variables below (such as `preInstallPhases`).
+
+##### `prePhases` {#var-stdenv-prePhases}
+
+Additional phases executed before any of the default phases.
+
+##### `preConfigurePhases` {#var-stdenv-preConfigurePhases}
+
+Additional phases executed just before the configure phase.
+
+##### `preBuildPhases` {#var-stdenv-preBuildPhases}
+
+Additional phases executed just before the build phase.
+
+##### `preInstallPhases` {#var-stdenv-preInstallPhases}
+
+Additional phases executed just before the install phase.
+
+##### `preFixupPhases` {#var-stdenv-preFixupPhases}
+
+Additional phases executed just before the fixup phase.
+
+##### `preDistPhases` {#var-stdenv-preDistPhases}
+
+Additional phases executed just before the distribution phase.
+
+##### `postPhases` {#var-stdenv-postPhases}
+
+Additional phases executed after any of the default phases.
+
+### The unpack phase {#ssec-unpack-phase}
+
+The unpack phase is responsible for unpacking the source code of the package. The default implementation of `unpackPhase` unpacks the source files listed in the `src` environment variable to the current directory. It supports the following files by default:
+
+#### Tar files {#tar-files}
+
+These can optionally be compressed using `gzip` (`.tar.gz`, `.tgz` or `.tar.Z`), `bzip2` (`.tar.bz2`, `.tbz2` or `.tbz`) or `xz` (`.tar.xz`, `.tar.lzma` or `.txz`).
+
+#### Zip files {#zip-files}
+
+Zip files are unpacked using `unzip`. However, `unzip` is not in the standard environment, so you should add it to `nativeBuildInputs` yourself.
+
+#### Directories in the Nix store {#directories-in-the-nix-store}
+
+These are copied to the current directory. The hash part of the file name is stripped, e.g. `/nix/store/1wydxgby13cz...-my-sources` would be copied to `my-sources`.
+
+Additional file types can be supported by setting the `unpackCmd` variable (see below).
+
+#### Variables controlling the unpack phase {#variables-controlling-the-unpack-phase}
+
+##### `srcs` / `src` {#var-stdenv-src}
+
+The list of source files or directories to be unpacked or copied. One of these must be set. Note that if you use `srcs`, you should also set `sourceRoot` or `setSourceRoot`.
+
+##### `sourceRoot` {#var-stdenv-sourceRoot}
+
+After unpacking all of `src` and `srcs`, if neither of `sourceRoot` and `setSourceRoot` are set, `unpackPhase` of the generic builder checks that the unpacking produced a single directory and moves the current working directory into it.
+
+If `unpackPhase` produces multiple source directories, you should set `sourceRoot` to the name of the intended directory.
+You can also set `sourceRoot = ".";` if you want to control it yourself in a later phase.
+
+For example, if your want your build to start in a sub-directory inside your sources, and you are using `fetchzip`-derived `src` (like `fetchFromGitHub` or similar), you need to set `sourceRoot = "${src.name}/my-sub-directory"`.
+
+##### `setSourceRoot` {#var-stdenv-setSourceRoot}
+
+Alternatively to setting `sourceRoot`, you can set `setSourceRoot` to a shell command to be evaluated by the unpack phase after the sources have been unpacked. This command must set `sourceRoot`.
+
+For example, if you are using `fetchurl` on an archive file that gets unpacked into a single directory the name of which changes between package versions, and you want your build to start in its sub-directory, you need to set `setSourceRoot = "sourceRoot=$(echo */my-sub-directory)";`, or in the case of multiple sources, you could use something more specific, like `setSourceRoot = "sourceRoot=$(echo ${pname}-*/my-sub-directory)";`.
+
+##### `preUnpack` {#var-stdenv-preUnpack}
+
+Hook executed at the start of the unpack phase.
+
+##### `postUnpack` {#var-stdenv-postUnpack}
+
+Hook executed at the end of the unpack phase.
+
+##### `dontUnpack` {#var-stdenv-dontUnpack}
+
+Set to true to skip the unpack phase.
+
+##### `dontMakeSourcesWritable` {#var-stdenv-dontMakeSourcesWritable}
+
+If set to `1`, the unpacked sources are *not* made writable. By default, they are made writable to prevent problems with read-only sources. For example, copied store directories would be read-only without this.
+
+##### `unpackCmd` {#var-stdenv-unpackCmd}
+
+The unpack phase evaluates the string `$unpackCmd` for any unrecognised file. The path to the current source file is contained in the `curSrc` variable.
+
+### The patch phase {#ssec-patch-phase}
+
+The patch phase applies the list of patches defined in the `patches` variable.
+
+#### Variables controlling the patch phase {#variables-controlling-the-patch-phase}
+
+##### `dontPatch` {#var-stdenv-dontPatch}
+
+Set to true to skip the patch phase.
+
+##### `patches` {#var-stdenv-patches}
+
+The list of patches. They must be in the format accepted by the `patch` command, and may optionally be compressed using `gzip` (`.gz`), `bzip2` (`.bz2`) or `xz` (`.xz`).
+
+##### `patchFlags` {#var-stdenv-patchFlags}
+
+Flags to be passed to `patch`. If not set, the argument `-p1` is used, which causes the leading directory component to be stripped from the file names in each patch.
+
+##### `prePatch` {#var-stdenv-prePatch}
+
+Hook executed at the start of the patch phase.
+
+##### `postPatch` {#var-stdenv-postPatch}
+
+Hook executed at the end of the patch phase.
+
+### The configure phase {#ssec-configure-phase}
+
+The configure phase prepares the source tree for building. The default `configurePhase` runs `./configure` (typically an Autoconf-generated script) if it exists.
+
+#### Variables controlling the configure phase {#variables-controlling-the-configure-phase}
+
+##### `configureScript` {#var-stdenv-configureScript}
+
+The name of the configure script. It defaults to `./configure` if it exists; otherwise, the configure phase is skipped. This can actually be a command (like `perl ./Configure.pl`).
+
+##### `configureFlags` {#var-stdenv-configureFlags}
+
+A list of strings passed as additional arguments to the configure script.
+
+##### `dontConfigure` {#var-stdenv-dontConfigure}
+
+Set to true to skip the configure phase.
+
+##### `configureFlagsArray` {#var-stdenv-configureFlagsArray}
+
+A shell array containing additional arguments passed to the configure script. You must use this instead of `configureFlags` if the arguments contain spaces.
+
+##### `dontAddPrefix` {#var-stdenv-dontAddPrefix}
+
+By default, the flag `--prefix=$prefix` is added to the configure flags. If this is undesirable, set this variable to true.
+
+##### `prefix` {#var-stdenv-prefix}
+
+The prefix under which the package must be installed, passed via the `--prefix` option to the configure script. It defaults to `$out`.
+
+##### `prefixKey` {#var-stdenv-prefixKey}
+
+The key to use when specifying the prefix. By default, this is set to `--prefix=` as that is used by the majority of packages.
+
+##### `dontAddStaticConfigureFlags` {#var-stdenv-dontAddStaticConfigureFlags}
+
+By default, when building statically, stdenv will try to add build system appropriate configure flags to try to enable static builds.
+
+If this is undesirable, set this variable to true.
+
+##### `dontAddDisableDepTrack` {#var-stdenv-dontAddDisableDepTrack}
+
+By default, the flag `--disable-dependency-tracking` is added to the configure flags to speed up Automake-based builds. If this is undesirable, set this variable to true.
+
+##### `dontFixLibtool` {#var-stdenv-dontFixLibtool}
+
+By default, the configure phase applies some special hackery to all files called `ltmain.sh` before running the configure script in order to improve the purity of Libtool-based packages [^footnote-stdenv-sys-lib-search-path] . If this is undesirable, set this variable to true.
+
+##### `dontDisableStatic` {#var-stdenv-dontDisableStatic}
+
+By default, when the configure script has `--enable-static`, the option `--disable-static` is added to the configure flags.
+
+If this is undesirable, set this variable to true.  It is automatically set to true when building statically, for example through `pkgsStatic`.
+
+##### `configurePlatforms` {#var-stdenv-configurePlatforms}
+
+By default, when cross compiling, the configure script has `--build=...` and `--host=...` passed. Packages can instead pass `[ "build" "host" "target" ]` or a subset to control exactly which platform flags are passed. Compilers and other tools can use this to also pass the target platform. [^footnote-stdenv-build-time-guessing-impurity]
+
+##### `preConfigure` {#var-stdenv-preConfigure}
+
+Hook executed at the start of the configure phase.
+
+##### `postConfigure` {#var-stdenv-postConfigure}
+
+Hook executed at the end of the configure phase.
+
+### The build phase {#build-phase}
+
+The build phase is responsible for actually building the package (e.g. compiling it). The default `buildPhase` calls `make` if a file named `Makefile`, `makefile` or `GNUmakefile` exists in the current directory (or the `makefile` is explicitly set); otherwise it does nothing.
+
+#### Variables controlling the build phase {#variables-controlling-the-build-phase}
+
+##### `dontBuild` {#var-stdenv-dontBuild}
+
+Set to true to skip the build phase.
+
+##### `makefile` {#var-stdenv-makefile}
+
+The file name of the Makefile.
+
+##### `makeFlags` {#var-stdenv-makeFlags}
+
+A list of strings passed as additional flags to `make`. These flags are also used by the default install and check phase. For setting make flags specific to the build phase, use `buildFlags` (see below).
+
+```nix
+{
+  makeFlags = [ "PREFIX=$(out)" ];
+}
+```
+
+::: {.note}
+The flags are quoted in bash, but environment variables can be specified by using the make syntax.
+:::
+
+##### `makeFlagsArray` {#var-stdenv-makeFlagsArray}
+
+A shell array containing additional arguments passed to `make`. You must use this instead of `makeFlags` if the arguments contain spaces, e.g.
+
+```nix
+{
+  preBuild = ''
+    makeFlagsArray+=(CFLAGS="-O0 -g" LDFLAGS="-lfoo -lbar")
+  '';
+}
+```
+
+Note that shell arrays cannot be passed through environment variables, so you cannot set `makeFlagsArray` in a derivation attribute (because those are passed through environment variables): you have to define them in shell code.
+
+##### `buildFlags` / `buildFlagsArray` {#var-stdenv-buildFlags}
+
+A list of strings passed as additional flags to `make`. Like `makeFlags` and `makeFlagsArray`, but only used by the build phase. Any build targets should be specified as part of the `buildFlags`.
+
+##### `preBuild` {#var-stdenv-preBuild}
+
+Hook executed at the start of the build phase.
+
+##### `postBuild` {#var-stdenv-postBuild}
+
+Hook executed at the end of the build phase.
+
+You can set flags for `make` through the `makeFlags` variable.
+
+Before and after running `make`, the hooks `preBuild` and `postBuild` are called, respectively.
+
+### The check phase {#ssec-check-phase}
+
+The check phase checks whether the package was built correctly by running its test suite. The default `checkPhase` calls `make $checkTarget`, but only if the [`doCheck` variable](#var-stdenv-doCheck) is enabled.
+
+#### Variables controlling the check phase {#variables-controlling-the-check-phase}
+
+##### `doCheck` {#var-stdenv-doCheck}
+
+Controls whether the check phase is executed. By default it is skipped, but if `doCheck` is set to true, the check phase is usually executed. Thus you should set
+
+```nix
+{
+  doCheck = true;
+}
+```
+
+in the derivation to enable checks. The exception is cross compilation. Cross compiled builds never run tests, no matter how `doCheck` is set, as the newly-built program won’t run on the platform used to build it.
+
+##### `makeFlags` / `makeFlagsArray` / `makefile` {#makeflags-makeflagsarray-makefile}
+
+See the [build phase](#var-stdenv-makeFlags) for details.
+
+##### `checkTarget` {#var-stdenv-checkTarget}
+
+The `make` target that runs the tests.
+If unset, use `check` if it exists, otherwise `test`; if neither is found, do nothing.
+
+##### `checkFlags` / `checkFlagsArray` {#var-stdenv-checkFlags}
+
+A list of strings passed as additional flags to `make`. Like `makeFlags` and `makeFlagsArray`, but only used by the check phase. Unlike with `buildFlags`, the `checkTarget` is automatically added to the `make` invocation in addition to any `checkFlags` specified.
+
+##### `checkInputs` {#var-stdenv-checkInputs}
+
+A list of host dependencies used by the phase, usually libraries linked into executables built during tests. This gets included in `buildInputs` when `doCheck` is set.
+
+##### `nativeCheckInputs` {#var-stdenv-nativeCheckInputs}
+
+A list of native dependencies used by the phase, notably tools needed on `$PATH`. This gets included in `nativeBuildInputs` when `doCheck` is set.
+
+##### `preCheck` {#var-stdenv-preCheck}
+
+Hook executed at the start of the check phase.
+
+##### `postCheck` {#var-stdenv-postCheck}
+
+Hook executed at the end of the check phase.
+
+### The install phase {#ssec-install-phase}
+
+The install phase is responsible for installing the package in the Nix store under `out`. The default `installPhase` creates the directory `$out` and calls `make install`.
+
+#### Variables controlling the install phase {#variables-controlling-the-install-phase}
+
+##### `dontInstall` {#var-stdenv-dontInstall}
+
+Set to true to skip the install phase.
+
+##### `makeFlags` / `makeFlagsArray` / `makefile` {#makeflags-makeflagsarray-makefile-1}
+
+See the [build phase](#var-stdenv-makeFlags) for details.
+
+##### `installTargets` {#var-stdenv-installTargets}
+
+The make targets that perform the installation. Defaults to `install`. Example:
+
+```nix
+{
+  installTargets = "install-bin install-doc";
+}
+```
+
+##### `installFlags` / `installFlagsArray` {#var-stdenv-installFlags}
+
+A list of strings passed as additional flags to `make`. Like `makeFlags` and `makeFlagsArray`, but only used by the install phase. Unlike with `buildFlags`, the `installTargets` are automatically added to the `make` invocation in addition to any `installFlags` specified.
+
+##### `preInstall` {#var-stdenv-preInstall}
+
+Hook executed at the start of the install phase.
+
+##### `postInstall` {#var-stdenv-postInstall}
+
+Hook executed at the end of the install phase.
+
+### The fixup phase {#ssec-fixup-phase}
+
+The fixup phase performs (Nix-specific) post-processing actions on the files installed under `$out` by the install phase. The default `fixupPhase` does the following:
+
+- It moves the `man/`, `doc/` and `info/` subdirectories of `$out` to `share/`.
+- It strips libraries and executables of debug information.
+- On Linux, it applies the `patchelf` command to ELF executables and libraries to remove unused directories from the `RPATH` in order to prevent unnecessary runtime dependencies.
+- It rewrites the interpreter paths of shell scripts to paths found in `PATH`. E.g., `/usr/bin/perl` will be rewritten to `/nix/store/some-perl/bin/perl` found in `PATH`. See [](#patch-shebangs.sh) for details.
+
+#### Variables controlling the fixup phase {#variables-controlling-the-fixup-phase}
+
+##### `dontFixup` {#var-stdenv-dontFixup}
+
+Set to true to skip the fixup phase.
+
+##### `dontStrip` {#var-stdenv-dontStrip}
+
+If set, libraries and executables are not stripped. By default, they are.
+
+##### `dontStripHost` {#var-stdenv-dontStripHost}
+
+Like `dontStrip`, but only affects the `strip` command targeting the package’s host platform. Useful when supporting cross compilation, but otherwise feel free to ignore.
+
+##### `dontStripTarget` {#var-stdenv-dontStripTarget}
+
+Like `dontStrip`, but only affects the `strip` command targeting the packages’ target platform. Useful when supporting cross compilation, but otherwise feel free to ignore.
+
+##### `dontMoveSbin` {#var-stdenv-dontMoveSbin}
+
+If set, files in `$out/sbin` are not moved to `$out/bin`. By default, they are.
+
+##### `stripAllList` {#var-stdenv-stripAllList}
+
+List of directories to search for libraries and executables from which *all* symbols should be stripped. By default, it’s empty. Stripping all symbols is risky, since it may remove not just debug symbols but also ELF information necessary for normal execution.
+
+##### `stripAllListTarget` {#var-stdenv-stripAllListTarget}
+
+Like `stripAllList`, but only applies to packages’ target platform. By default, it’s empty. Useful when supporting cross compilation.
+
+##### `stripAllFlags` {#var-stdenv-stripAllFlags}
+
+Flags passed to the `strip` command applied to the files in the directories listed in `stripAllList`. Defaults to `-s` (i.e. `--strip-all`).
+
+##### `stripDebugList` {#var-stdenv-stripDebugList}
+
+List of directories to search for libraries and executables from which only debugging-related symbols should be stripped. It defaults to `lib lib32 lib64 libexec bin sbin`.
+
+##### `stripDebugListTarget` {#var-stdenv-stripDebugListTarget}
+
+Like `stripDebugList`, but only applies to packages’ target platform. By default, it’s empty. Useful when supporting cross compilation.
+
+##### `stripDebugFlags` {#var-stdenv-stripDebugFlags}
+
+Flags passed to the `strip` command applied to the files in the directories listed in `stripDebugList`. Defaults to `-S` (i.e. `--strip-debug`).
+
+##### `stripExclude` {#var-stdenv-stripExclude}
+
+A list of filenames or path patterns to avoid stripping. A file is excluded if its name _or_ path (from the derivation root) matches.
+
+This example prevents all `*.rlib` files from being stripped:
+
+```nix
+stdenv.mkDerivation {
+  # ...
+  stripExclude = [ "*.rlib" ];
+}
+```
+
+This example prevents files within certain paths from being stripped:
+
+```nix
+stdenv.mkDerivation {
+  # ...
+  stripExclude = [ "lib/modules/*/build/*" ];
+}
+```
+
+##### `dontPatchELF` {#var-stdenv-dontPatchELF}
+
+If set, the `patchelf` command is not used to remove unnecessary `RPATH` entries. Only applies to Linux.
+
+##### `dontPatchShebangs` {#var-stdenv-dontPatchShebangs}
+
+If set, scripts starting with `#!` do not have their interpreter paths rewritten to paths in the Nix store. See [](#patch-shebangs.sh) on how patching shebangs works.
+
+##### `dontPruneLibtoolFiles` {#var-stdenv-dontPruneLibtoolFiles}
+
+If set, libtool `.la` files associated with shared libraries won’t have their `dependency_libs` field cleared.
+
+##### `forceShare` {#var-stdenv-forceShare}
+
+The list of directories that must be moved from `$out` to `$out/share`. Defaults to `man doc info`.
+
+##### `setupHook` {#var-stdenv-setupHook}
+
+A package can export a [setup hook](#ssec-setup-hooks) by setting this variable. The setup hook, if defined, is copied to `$out/nix-support/setup-hook`. Environment variables are then substituted in it using `substituteAll`.
+
+##### `preFixup` {#var-stdenv-preFixup}
+
+Hook executed at the start of the fixup phase.
+
+##### `postFixup` {#var-stdenv-postFixup}
+
+Hook executed at the end of the fixup phase.
+
+##### `separateDebugInfo` {#stdenv-separateDebugInfo}
+
+If set to `true`, the standard environment will enable debug information in C/C++ builds. After installation, the debug information will be separated from the executables and stored in the output named `debug`. (This output is enabled automatically; you don’t need to set the `outputs` attribute explicitly.) To be precise, the debug information is stored in `debug/lib/debug/.build-id/XX/YYYY…`, where \<XXYYYY…\> is the \<build ID\> of the binary — a SHA-1 hash of the contents of the binary. Debuggers like GDB use the build ID to look up the separated debug information.
+
+:::{.example #ex-gdb-debug-symbols-socat}
+
+# Enable debug symbols for use with GDB
+
+To make GDB find debug information for the `socat` package and its dependencies, you can use the following `shell.nix`:
+
+```nix
+let
+  pkgs = import ./. {
+    config = {};
+    overlays = [
+      (final: prev: {
+        ncurses = prev.ncurses.overrideAttrs { separateDebugInfo = true; };
+        readline = prev.readline.overrideAttrs { separateDebugInfo = true; };
+      })
+    ];
+  };
+
+  myDebugInfoDirs = pkgs.symlinkJoin {
+    name = "myDebugInfoDirs";
+    paths = with pkgs; [
+      glibc.debug
+      ncurses.debug
+      openssl.debug
+      readline.debug
+    ];
+  };
+in
+  pkgs.mkShell {
+
+    NIX_DEBUG_INFO_DIRS = "${pkgs.lib.getLib myDebugInfoDirs}/lib/debug";
+
+    packages = [
+      pkgs.gdb
+      pkgs.socat
+    ];
+
+    shellHook = ''
+      ${pkgs.lib.getBin pkgs.gdb}/bin/gdb ${pkgs.lib.getBin pkgs.socat}/bin/socat
+    '';
+  }
+```
+
+This setup works as follows:
+- Add [`overlays`](#chap-overlays) to the package set, since debug symbols are disabled for `ncurses` and `readline` by default.
+- Create a derivation to combine all required debug symbols under one path with [`symlinkJoin`](#trivial-builder-symlinkJoin).
+- Set the environment variable `NIX_DEBUG_INFO_DIRS` in the shell. Nixpkgs patches `gdb` to use it for looking up debug symbols.
+- Run `gdb` on the `socat` binary on shell startup in the [`shellHook`](#sec-pkgs-mkShell). Here we use [`lib.getBin`](#function-library-lib.attrsets.getBin) to ensure that the correct derivation output is selected rather than the default one.
+
+:::
+
+### The installCheck phase {#ssec-installCheck-phase}
+
+The installCheck phase checks whether the package was installed correctly by running its test suite against the installed directories. The default `installCheck` calls `make installcheck`.
+
+It is often better to add tests that are not part of the source distribution to `passthru.tests` (see
+[](#var-meta-tests)). This avoids adding overhead to every build and enables us to run them independently.
+
+#### Variables controlling the installCheck phase {#variables-controlling-the-installcheck-phase}
+
+##### `doInstallCheck` {#var-stdenv-doInstallCheck}
+
+Controls whether the installCheck phase is executed. By default it is skipped, but if `doInstallCheck` is set to true, the installCheck phase is usually executed. Thus you should set
+
+```nix
+{
+  doInstallCheck = true;
+}
+```
+
+in the derivation to enable install checks. The exception is cross compilation. Cross compiled builds never run tests, no matter how `doInstallCheck` is set, as the newly-built program won’t run on the platform used to build it.
+
+##### `installCheckTarget` {#var-stdenv-installCheckTarget}
+
+The make target that runs the install tests. Defaults to `installcheck`.
+
+##### `installCheckFlags` / `installCheckFlagsArray` {#var-stdenv-installCheckFlags}
+
+A list of strings passed as additional flags to `make`. Like `makeFlags` and `makeFlagsArray`, but only used by the installCheck phase.
+
+##### `installCheckInputs` {#var-stdenv-installCheckInputs}
+
+A list of host dependencies used by the phase, usually libraries linked into executables built during tests. This gets included in `buildInputs` when `doInstallCheck` is set.
+
+##### `nativeInstallCheckInputs` {#var-stdenv-nativeInstallCheckInputs}
+
+A list of native dependencies used by the phase, notably tools needed on `$PATH`. This gets included in `nativeBuildInputs` when `doInstallCheck` is set.
+
+##### `preInstallCheck` {#var-stdenv-preInstallCheck}
+
+Hook executed at the start of the installCheck phase.
+
+##### `postInstallCheck` {#var-stdenv-postInstallCheck}
+
+Hook executed at the end of the installCheck phase.
+
+### The distribution phase {#ssec-distribution-phase}
+
+The distribution phase is intended to produce a source distribution of the package. The default `distPhase` first calls `make dist`, then it copies the resulting source tarballs to `$out/tarballs/`. This phase is only executed if the attribute `doDist` is set.
+
+#### Variables controlling the distribution phase {#variables-controlling-the-distribution-phase}
+
+##### `doDist` {#var-stdenv-doDist}
+
+If set, the distribution phase is executed.
+
+##### `distTarget` {#var-stdenv-distTarget}
+
+The make target that produces the distribution. Defaults to `dist`.
+
+##### `distFlags` / `distFlagsArray` {#var-stdenv-distFlags}
+
+Additional flags passed to `make`.
+
+##### `tarballs` {#var-stdenv-tarballs}
+
+The names of the source distribution files to be copied to `$out/tarballs/`. It can contain shell wildcards. The default is `*.tar.gz`.
+
+##### `dontCopyDist` {#var-stdenv-dontCopyDist}
+
+If set, no files are copied to `$out/tarballs/`.
+
+##### `preDist` {#var-stdenv-preDist}
+
+Hook executed at the start of the distribution phase.
+
+##### `postDist` {#var-stdenv-postDist}
+
+Hook executed at the end of the distribution phase.
+
+## Shell functions and utilities {#ssec-stdenv-functions}
+
+The standard environment provides a number of useful functions.
+
+### `makeWrapper` \<executable\> \<wrapperfile\> \<args\> {#fun-makeWrapper}
+
+Constructs a wrapper for a program with various possible arguments. It is defined as part of 2 setup-hooks named `makeWrapper` and `makeBinaryWrapper` that implement the same bash functions. Hence, to use it you have to add `makeWrapper` to your `nativeBuildInputs`. Here's an example usage:
+
+```bash
+# adds `FOOBAR=baz` to `$out/bin/foo`’s environment
+makeWrapper $out/bin/foo $wrapperfile --set FOOBAR baz
+
+# Prefixes the binary paths of `hello` and `git`
+# and suffixes the binary path of `xdg-utils`.
+# Be advised that paths often should be patched in directly
+# (via string replacements or in `configurePhase`).
+makeWrapper $out/bin/foo $wrapperfile \
+  --prefix PATH : ${lib.makeBinPath [ hello git ]} \
+  --suffix PATH : ${lib.makeBinPath [ xdg-utils ]}
+```
+
+Packages may expect or require other utilities to be available at runtime.
+`makeWrapper` can be used to add packages to a `PATH` environment variable local to a wrapper.
+
+Use `--prefix` to explicitly set dependencies in `PATH`.
+
+::: {.note}
+`--prefix` essentially hard-codes dependencies into the wrapper.
+They cannot be overridden without rebuilding the package.
+:::
+
+If dependencies should be resolved at runtime, use `--suffix` to append fallback values to `PATH`.
+
+There’s many more kinds of arguments, they are documented in `nixpkgs/pkgs/build-support/setup-hooks/make-wrapper.sh` for the `makeWrapper` implementation and in `nixpkgs/pkgs/build-support/setup-hooks/make-binary-wrapper/make-binary-wrapper.sh` for the `makeBinaryWrapper` implementation.
+
+`wrapProgram` is a convenience function you probably want to use most of the time, implemented by both `makeWrapper` and `makeBinaryWrapper`.
+
+Using the `makeBinaryWrapper` implementation is usually preferred, as it creates a tiny _compiled_ wrapper executable, that can be used as a shebang interpreter. This is needed mostly on Darwin, where shebangs cannot point to scripts, [due to a limitation with the `execve`-syscall](https://stackoverflow.com/questions/67100831/macos-shebang-with-absolute-path-not-working). Compiled wrappers generated by `makeBinaryWrapper` can be inspected with `less <path-to-wrapper>` - by scrolling past the binary data you should be able to see the shell command that generated the executable and there see the environment variables that were injected into the wrapper.
+
+### `remove-references-to -t` \<storepath\> [ `-t` \<storepath\> ... ] \<file\> ... {#fun-remove-references-to}
+
+Removes the references of the specified files to the specified store files. This is done without changing the size of the file by replacing the hash by `eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee`, and should work on compiled executables. This is meant to be used to remove the dependency of the output on inputs that are known to be unnecessary at runtime. Of course, reckless usage will break the patched programs.
+To use this, add `removeReferencesTo` to `nativeBuildInputs`.
+
+As `remove-references-to` is an actual executable and not a shell function, it can be used with `find`.
+Example removing all references to the compiler in the output:
+```nix
+{
+  postInstall = ''
+    find "$out" -type f -exec remove-references-to -t ${stdenv.cc} '{}' +
+  '';
+}
+```
+
+### `substitute` \<infile\> \<outfile\> \<subs\> {#fun-substitute}
+
+Performs string substitution on the contents of \<infile\>, writing the result to \<outfile\>. The substitutions in \<subs\> are of the following form:
+
+#### `--replace-fail` \<s1\> \<s2\> {#fun-substitute-replace-fail}
+
+Replace every occurrence of the string \<s1\> by \<s2\>.
+Will error if no change is made.
+
+#### `--replace-warn` \<s1\> \<s2\> {#fun-substitute-replace-warn}
+
+Replace every occurrence of the string \<s1\> by \<s2\>.
+Will print a warning if no change is made.
+
+#### `--replace-quiet` \<s1\> \<s2\> {#fun-substitute-replace-quiet}
+
+Replace every occurrence of the string \<s1\> by \<s2\>.
+Will do nothing if no change can be made.
+
+#### `--subst-var` \<varName\> {#fun-substitute-subst-var}
+
+Replace every occurrence of `@varName@` by the contents of the environment variable \<varName\>. This is useful for generating files from templates, using `@...@` in the template as placeholders.
+
+#### `--subst-var-by` \<varName\> \<s\> {#fun-substitute-subst-var-by}
+
+Replace every occurrence of `@varName@` by the string \<s\>.
+
+Example:
+
+```shell
+substitute ./foo.in ./foo.out \
+    --replace-fail /usr/bin/bar $bar/bin/bar \
+    --replace-fail "a string containing spaces" "some other text" \
+    --subst-var someVar
+```
+
+### `substituteInPlace` \<multiple files\> \<subs\> {#fun-substituteInPlace}
+
+Like `substitute`, but performs the substitutions in place on the files passed.
+
+### `substituteAll` \<infile\> \<outfile\> {#fun-substituteAll}
+
+Replaces every occurrence of `@varName@`, where \<varName\> is any environment variable, in \<infile\>, writing the result to \<outfile\>. For instance, if \<infile\> has the contents
+
+```bash
+#! @bash@/bin/sh
+PATH=@coreutils@/bin
+echo @foo@
+```
+
+and the environment contains `bash=/nix/store/bmwp0q28cf21...-bash-3.2-p39` and `coreutils=/nix/store/68afga4khv0w...-coreutils-6.12`, but does not contain the variable `foo`, then the output will be
+
+```bash
+#! /nix/store/bmwp0q28cf21...-bash-3.2-p39/bin/sh
+PATH=/nix/store/68afga4khv0w...-coreutils-6.12/bin
+echo @foo@
+```
+
+That is, no substitution is performed for undefined variables.
+
+Environment variables that start with an uppercase letter or an underscore are filtered out, to prevent global variables (like `HOME`) or private variables (like `__ETC_PROFILE_DONE`) from accidentally getting substituted. The variables also have to be valid bash "names", as defined in the bash manpage (alphanumeric or `_`, must not start with a number).
+
+### `substituteAllInPlace` \<file\> {#fun-substituteAllInPlace}
+
+Like `substituteAll`, but performs the substitutions in place on the file \<file\>.
+
+### `stripHash` \<path\> {#fun-stripHash}
+
+Strips the directory and hash part of a store path, outputting the name part to `stdout`. For example:
+
+```bash
+# prints coreutils-8.24
+stripHash "/nix/store/9s9r019176g7cvn2nvcw41gsp862y6b4-coreutils-8.24"
+```
+
+If you wish to store the result in another variable, then the following idiom may be useful:
+
+```bash
+name="/nix/store/9s9r019176g7cvn2nvcw41gsp862y6b4-coreutils-8.24"
+someVar=$(stripHash $name)
+```
+
+### `wrapProgram` \<executable\> \<makeWrapperArgs\> {#fun-wrapProgram}
+
+Convenience function for `makeWrapper` that replaces `<executable>` with a wrapper that executes the original program. It takes all the same arguments as `makeWrapper`, except for `--inherit-argv0` (used by the `makeBinaryWrapper` implementation) and `--argv0` (used by both `makeWrapper` and `makeBinaryWrapper` wrapper implementations).
+
+If you will apply it multiple times, it will overwrite the wrapper file and you will end up with double wrapping, which should be avoided.
+
+### `prependToVar` \<variableName\> \<elements...\> {#fun-prependToVar}
+
+Prepend elements to a variable.
+
+Example:
+
+```shellSession
+$ configureFlags="--disable-static"
+$ prependToVar configureFlags --disable-dependency-tracking --enable-foo
+$ echo $configureFlags
+--disable-dependency-tracking --enable-foo --disable-static
+```
+
+### `appendToVar` \<variableName\> \<elements...\> {#fun-appendToVar}
+
+Append elements to a variable.
+
+Example:
+
+```shellSession
+$ configureFlags="--disable-static"
+$ appendToVar configureFlags --disable-dependency-tracking --enable-foo
+$ echo $configureFlags
+--disable-static --disable-dependency-tracking --enable-foo
+```
+
+## Package setup hooks {#ssec-setup-hooks}
+
+Nix itself considers a build-time dependency as merely something that should previously be built and accessible at build time—packages themselves are on their own to perform any additional setup. In most cases, that is fine, and the downstream derivation can deal with its own dependencies. But for a few common tasks, that would result in almost every package doing the same sort of setup work—depending not on the package itself, but entirely on which dependencies were used.
+
+In order to alleviate this burden, the setup hook mechanism was written, where any package can include a shell script that \[by convention rather than enforcement by Nix\], any downstream reverse-dependency will source as part of its build process. That allows the downstream dependency to merely specify its dependencies, and lets those dependencies effectively initialize themselves. No boilerplate mirroring the list of dependencies is needed.
+
+The setup hook mechanism is a bit of a sledgehammer though: a powerful feature with a broad and indiscriminate area of effect. The combination of its power and implicit use may be expedient, but isn’t without costs. Nix itself is unchanged, but the spirit of added dependencies being effect-free is violated even if the latter isn’t. For example, if a derivation path is mentioned more than once, Nix itself doesn’t care and makes sure the dependency derivation is already built just the same—depending is just needing something to exist, and needing is idempotent. However, a dependency specified twice will have its setup hook run twice, and that could easily change the build environment (though a well-written setup hook will therefore strive to be idempotent so this is in fact not observable). More broadly, setup hooks are anti-modular in that multiple dependencies, whether the same or different, should not interfere and yet their setup hooks may well do so.
+
+The most typical use of the setup hook is actually to add other hooks which are then run (i.e. after all the setup hooks) on each dependency. For example, the C compiler wrapper’s setup hook feeds itself flags for each dependency that contains relevant libraries and headers. This is done by defining a bash function, and appending its name to one of `envBuildBuildHooks`, `envBuildHostHooks`, `envBuildTargetHooks`, `envHostHostHooks`, `envHostTargetHooks`, or `envTargetTargetHooks`. These 6 bash variables correspond to the 6 sorts of dependencies by platform (there’s 12 total but we ignore the propagated/non-propagated axis).
+
+Packages adding a hook should not hard code a specific hook, but rather choose a variable *relative* to how they are included. Returning to the C compiler wrapper example, if the wrapper itself is an `n` dependency, then it only wants to accumulate flags from `n + 1` dependencies, as only those ones match the compiler’s target platform. The `hostOffset` variable is defined with the current dependency’s host offset `targetOffset` with its target offset, before its setup hook is sourced. Additionally, since most environment hooks don’t care about the target platform, that means the setup hook can append to the right bash array by doing something like
+
+```bash
+addEnvHooks "$hostOffset" myBashFunction
+```
+
+The *existence* of setups hooks has long been documented and packages inside Nixpkgs are free to use this mechanism. Other packages, however, should not rely on these mechanisms not changing between Nixpkgs versions. Because of the existing issues with this system, there’s little benefit from mandating it be stable for any period of time.
+
+First, let’s cover some setup hooks that are part of Nixpkgs default `stdenv`. This means that they are run for every package built using `stdenv.mkDerivation` or when using a custom builder that has `source $stdenv/setup`. Some of these are platform specific, so they may run on Linux but not Darwin or vice-versa.
+
+### `move-docs.sh` {#move-docs.sh}
+
+This setup hook moves any installed documentation to the `/share` subdirectory directory. This includes the man, doc and info directories. This is needed for legacy programs that do not know how to use the `share` subdirectory.
+
+### `compress-man-pages.sh` {#compress-man-pages.sh}
+
+This setup hook compresses any man pages that have been installed. The compression is done using the gzip program. This helps to reduce the installed size of packages.
+
+### `strip.sh` {#strip.sh}
+
+This runs the strip command on installed binaries and libraries. This removes unnecessary information like debug symbols when they are not needed. This also helps to reduce the installed size of packages.
+
+### `patch-shebangs.sh` {#patch-shebangs.sh}
+
+This setup hook patches installed scripts to add Nix store paths to their shebang interpreter as found in the build environment. The [shebang](https://en.wikipedia.org/wiki/Shebang_(Unix)) line tells a Unix-like operating system which interpreter to use to execute the script's contents.
+
+::: {.note}
+The [generic builder][generic-builder] populates `PATH` from inputs of the derivation.
+:::
+
+[generic-builder]: https://github.com/NixOS/nixpkgs/blob/19d4f7dc485f74109bd66ef74231285ff797a823/pkgs/stdenv/generic/builder.sh
+
+#### Invocation {#patch-shebangs.sh-invocation}
+
+Multiple paths can be specified.
+
+```
+patchShebangs [--build | --host] PATH...
+```
+
+##### Flags {#patch-shebangs.sh-invocation-flags}
+
+`--build`
+: Look up commands available at build time
+
+`--host`
+: Look up commands available at run time
+
+##### Examples {#patch-shebangs.sh-invocation-examples}
+
+```sh
+patchShebangs --host /nix/store/<hash>-hello-1.0/bin
+```
+
+```sh
+patchShebangs --build configure
+```
+
+`#!/bin/sh` will be rewritten to `#!/nix/store/<hash>-some-bash/bin/sh`.
+
+`#!/usr/bin/env` gets special treatment: `#!/usr/bin/env python` is rewritten to `/nix/store/<hash>/bin/python`.
+
+Interpreter paths that point to a valid Nix store location are not changed.
+
+::: {.note}
+A script file must be marked as executable, otherwise it will not be
+considered.
+:::
+
+This mechanism ensures that the interpreter for a given script is always found and is exactly the one specified by the build.
+
+It can be disabled by setting [`dontPatchShebangs`](#var-stdenv-dontPatchShebangs):
+
+```nix
+stdenv.mkDerivation {
+  # ...
+  dontPatchShebangs = true;
+  # ...
+}
+```
+
+The file [`patch-shebangs.sh`][patch-shebangs.sh] defines the [`patchShebangs`][patchShebangs] function. It is used to implement [`patchShebangsAuto`][patchShebangsAuto], the [setup hook](#ssec-setup-hooks) that is registered to run during the [fixup phase](#ssec-fixup-phase) by default.
+
+If you need to run `patchShebangs` at build time, it must be called explicitly within [one of the build phases](#sec-stdenv-phases).
+
+[patch-shebangs.sh]: https://github.com/NixOS/nixpkgs/blob/19d4f7dc485f74109bd66ef74231285ff797a823/pkgs/build-support/setup-hooks/patch-shebangs.sh
+[patchShebangs]: https://github.com/NixOS/nixpkgs/blob/19d4f7dc485f74109bd66ef74231285ff797a823/pkgs/build-support/setup-hooks/patch-shebangs.sh#L24-L105
+[patchShebangsAuto]: https://github.com/NixOS/nixpkgs/blob/19d4f7dc485f74109bd66ef74231285ff797a823/pkgs/build-support/setup-hooks/patch-shebangs.sh#L107-L119
+
+### `audit-tmpdir.sh` {#audit-tmpdir.sh}
+
+This verifies that no references are left from the install binaries to the directory used to build those binaries. This ensures that the binaries do not need things outside the Nix store. This is currently supported in Linux only.
+
+### `multiple-outputs.sh` {#multiple-outputs.sh}
+
+This setup hook adds configure flags that tell packages to install files into any one of the proper outputs listed in `outputs`. This behavior can be turned off by setting `setOutputFlags` to false in the derivation environment. See [](#chap-multiple-output) for more information.
+
+### `move-sbin.sh` {#move-sbin.sh}
+
+This setup hook moves any binaries installed in the `sbin/` subdirectory into `bin/`. In addition, a link is provided from `sbin/` to `bin/` for compatibility.
+
+### `move-lib64.sh` {#move-lib64.sh}
+
+This setup hook moves any libraries installed in the `lib64/` subdirectory into `lib/`. In addition, a link is provided from `lib64/` to `lib/` for compatibility.
+
+### `move-systemd-user-units.sh` {#move-systemd-user-units.sh}
+
+This setup hook moves any systemd user units installed in the `lib/` subdirectory into `share/`. In addition, a link is provided from `share/` to `lib/` for compatibility. This is needed for systemd to find user services when installed into the user profile.
+
+This hook only runs when compiling for Linux.
+
+### `set-source-date-epoch-to-latest.sh` {#set-source-date-epoch-to-latest.sh}
+
+This sets `SOURCE_DATE_EPOCH` to the modification time of the most recent file.
+
+### Bintools Wrapper and hook {#bintools-wrapper}
+
+The Bintools Wrapper wraps the binary utilities for a bunch of miscellaneous purposes. These are GNU Binutils when targeting Linux, and a mix of cctools and GNU binutils for Darwin. \[The “Bintools” name is supposed to be a compromise between “Binutils” and “cctools” not denoting any specific implementation.\] Specifically, the underlying bintools package, and a C standard library (glibc or Darwin’s libSystem, just for the dynamic loader) are all fed in, and dependency finding, hardening (see below), and purity checks for each are handled by the Bintools Wrapper. Packages typically depend on CC Wrapper, which in turn (at run time) depends on the Bintools Wrapper.
+
+The Bintools Wrapper was only just recently split off from CC Wrapper, so the division of labor is still being worked out. For example, it shouldn’t care about the C standard library, but just take a derivation with the dynamic loader (which happens to be the glibc on linux). Dependency finding however is a task both wrappers will continue to need to share, and probably the most important to understand. It is currently accomplished by collecting directories of host-platform dependencies (i.e. `buildInputs` and `nativeBuildInputs`) in environment variables. The Bintools Wrapper’s setup hook causes any `lib` and `lib64` subdirectories to be added to `NIX_LDFLAGS`. Since the CC Wrapper and the Bintools Wrapper use the same strategy, most of the Bintools Wrapper code is sparsely commented and refers to the CC Wrapper. But the CC Wrapper’s code, by contrast, has quite lengthy comments. The Bintools Wrapper merely cites those, rather than repeating them, to avoid falling out of sync.
+
+A final task of the setup hook is defining a number of standard environment variables to tell build systems which executables fulfill which purpose. They are defined to just be the base name of the tools, under the assumption that the Bintools Wrapper’s binaries will be on the path. Firstly, this helps poorly-written packages, e.g. ones that look for just `gcc` when `CC` isn’t defined yet `clang` is to be used. Secondly, this helps packages not get confused when cross-compiling, in which case multiple Bintools Wrappers may simultaneously be in use. [^footnote-stdenv-per-platform-wrapper] `BUILD_`- and `TARGET_`-prefixed versions of the normal environment variable are defined for additional Bintools Wrappers, properly disambiguating them.
+
+A problem with this final task is that the Bintools Wrapper is honest and defines `LD` as `ld`. Most packages, however, firstly use the C compiler for linking, secondly use `LD` anyways, defining it as the C compiler, and thirdly, only so define `LD` when it is undefined as a fallback. This triple-threat means Bintools Wrapper will break those packages, as LD is already defined as the actual linker which the package won’t override yet doesn’t want to use. The workaround is to define, just for the problematic package, `LD` as the C compiler. A good way to do this would be `preConfigure = "LD=$CC"`.
+
+### CC Wrapper and hook {#cc-wrapper}
+
+The CC Wrapper wraps a C toolchain for a bunch of miscellaneous purposes. Specifically, a C compiler (GCC or Clang), wrapped binary tools, and a C standard library (glibc or Darwin’s libSystem, just for the dynamic loader) are all fed in, and dependency finding, hardening (see below), and purity checks for each are handled by the CC Wrapper. Packages typically depend on the CC Wrapper, which in turn (at run-time) depends on the Bintools Wrapper.
+
+Dependency finding is undoubtedly the main task of the CC Wrapper. This works just like the Bintools Wrapper, except that any `include` subdirectory of any relevant dependency is added to `NIX_CFLAGS_COMPILE`. The setup hook itself contains elaborate comments describing the exact mechanism by which this is accomplished.
+
+Similarly, the CC Wrapper follows the Bintools Wrapper in defining standard environment variables with the names of the tools it wraps, for the same reasons described above. Importantly, while it includes a `cc` symlink to the c compiler for portability, the `CC` will be defined using the compiler’s “real name” (i.e. `gcc` or `clang`). This helps lousy build systems that inspect on the name of the compiler rather than run it.
+
+Here are some more packages that provide a setup hook. Since the list of hooks is extensible, this is not an exhaustive list. The mechanism is only to be used as a last resort, so it might cover most uses.
+
+### Other hooks {#stdenv-other-hooks}
+
+Many other packages provide hooks, that are not part of `stdenv`. You can find
+these in the [Hooks Reference](#chap-hooks).
+
+### Compiler and Linker wrapper hooks {#compiler-linker-wrapper-hooks}
+
+If the file `${cc}/nix-support/cc-wrapper-hook` exists, it will be run at the end of the [compiler wrapper](#cc-wrapper).
+If the file `${binutils}/nix-support/post-link-hook` exists, it will be run at the end of the linker wrapper.
+These hooks allow a user to inject code into the wrappers.
+As an example, these hooks can be used to extract `extraBefore`, `params` and `extraAfter` which store all the command line arguments passed to the compiler and linker respectively.
+
+## Purity in Nixpkgs {#sec-purity-in-nixpkgs}
+
+*Measures taken to prevent dependencies on packages outside the store, and what you can do to prevent them.*
+
+GCC doesn’t search in locations such as `/usr/include`. In fact, attempts to add such directories through the `-I` flag are filtered out. Likewise, the linker (from GNU binutils) doesn’t search in standard locations such as `/usr/lib`. Programs built on Linux are linked against a GNU C Library that likewise doesn’t search in the default system locations.
+
+## Hardening in Nixpkgs {#sec-hardening-in-nixpkgs}
+
+There are flags available to harden packages at compile or link-time. These can be toggled using the `stdenv.mkDerivation` parameters `hardeningDisable` and `hardeningEnable`.
+
+Both parameters take a list of flags as strings. The special `"all"` flag can be passed to `hardeningDisable` to turn off all hardening. These flags can also be used as environment variables for testing or development purposes.
+
+For more in-depth information on these hardening flags and hardening in general, refer to the [Debian Wiki](https://wiki.debian.org/Hardening), [Ubuntu Wiki](https://wiki.ubuntu.com/Security/Features), [Gentoo Wiki](https://wiki.gentoo.org/wiki/Project:Hardened), and the [Arch Wiki](https://wiki.archlinux.org/title/Security).
+
+### Hardening flags enabled by default {#sec-hardening-flags-enabled-by-default}
+
+The following flags are enabled by default and might require disabling with `hardeningDisable` if the program to package is incompatible.
+
+#### `format` {#format}
+
+Adds the `-Wformat -Wformat-security -Werror=format-security` compiler options. At present, this warns about calls to `printf` and `scanf` functions where the format string is not a string literal and there are no format arguments, as in `printf(foo);`. This may be a security hole if the format string came from untrusted input and contains `%n`.
+
+This needs to be turned off or fixed for errors similar to:
+
+```
+/tmp/nix-build-zynaddsubfx-2.5.2.drv-0/zynaddsubfx-2.5.2/src/UI/guimain.cpp:571:28: error: format not a string literal and no format arguments [-Werror=format-security]
+         printf(help_message);
+                            ^
+cc1plus: some warnings being treated as errors
+```
+
+#### `stackprotector` {#stackprotector}
+
+Adds the `-fstack-protector-strong --param ssp-buffer-size=4` compiler options. This adds safety checks against stack overwrites rendering many potential code injection attacks into aborting situations. In the best case this turns code injection vulnerabilities into denial of service or into non-issues (depending on the application).
+
+This needs to be turned off or fixed for errors similar to:
+
+```
+bin/blib.a(bios_console.o): In function `bios_handle_cup':
+/tmp/nix-build-ipxe-20141124-5cbdc41.drv-0/ipxe-5cbdc41/src/arch/i386/firmware/pcbios/bios_console.c:86: undefined reference to `__stack_chk_fail'
+```
+
+#### `fortify` {#fortify}
+
+Adds the `-O2 -D_FORTIFY_SOURCE=2` compiler options. During code generation the compiler knows a great deal of information about buffer sizes (where possible), and attempts to replace insecure unlimited length buffer function calls with length-limited ones. This is especially useful for old, crufty code. Additionally, format strings in writable memory that contain `%n` are blocked. If an application depends on such a format string, it will need to be worked around.
+
+Additionally, some warnings are enabled which might trigger build failures if compiler warnings are treated as errors in the package build. In this case, set `env.NIX_CFLAGS_COMPILE` to `-Wno-error=warning-type`.
+
+This needs to be turned off or fixed for errors similar to:
+
+```
+malloc.c:404:15: error: return type is an incomplete type
+malloc.c:410:19: error: storage size of 'ms' isn't known
+
+strdup.h:22:1: error: expected identifier or '(' before '__extension__'
+
+strsep.c:65:23: error: register name not specified for 'delim'
+
+installwatch.c:3751:5: error: conflicting types for '__open_2'
+
+fcntl2.h:50:4: error: call to '__open_missing_mode' declared with attribute error: open with O_CREAT or O_TMPFILE in second argument needs 3 arguments
+```
+
+#### `pic` {#pic}
+
+Adds the `-fPIC` compiler options. This options adds support for position independent code in shared libraries and thus making ASLR possible.
+
+Most notably, the Linux kernel, kernel modules and other code not running in an operating system environment like boot loaders won’t build with PIC enabled. The compiler will is most cases complain that PIC is not supported for a specific build.
+
+This needs to be turned off or fixed for assembler errors similar to:
+
+```
+ccbLfRgg.s: Assembler messages:
+ccbLfRgg.s:33: Error: missing or invalid displacement expression `private_key_len@GOTOFF'
+```
+
+#### `strictoverflow` {#strictoverflow}
+
+Signed integer overflow is undefined behaviour according to the C standard. If it happens, it is an error in the program as it should check for overflow before it can happen, not afterwards. GCC provides built-in functions to perform arithmetic with overflow checking, which are correct and faster than any custom implementation. As a workaround, the option `-fno-strict-overflow` makes gcc behave as if signed integer overflows were defined.
+
+This flag should not trigger any build or runtime errors.
+
+#### `relro` {#relro}
+
+Adds the `-z relro` linker option. During program load, several ELF memory sections need to be written to by the linker, but can be turned read-only before turning over control to the program. This prevents some GOT (and .dtors) overwrite attacks, but at least the part of the GOT used by the dynamic linker (.got.plt) is still vulnerable.
+
+This flag can break dynamic shared object loading. For instance, the module systems of Xorg and OpenCV are incompatible with this flag. In almost all cases the `bindnow` flag must also be disabled and incompatible programs typically fail with similar errors at runtime.
+
+#### `bindnow` {#bindnow}
+
+Adds the `-z now` linker option. During program load, all dynamic symbols are resolved, allowing for the complete GOT to be marked read-only (due to `relro`). This prevents GOT overwrite attacks. For very large applications, this can incur some performance loss during initial load while symbols are resolved, but this shouldn’t be an issue for daemons.
+
+This flag can break dynamic shared object loading. For instance, the module systems of Xorg and PHP are incompatible with this flag. Programs incompatible with this flag often fail at runtime due to missing symbols, like:
+
+```
+intel_drv.so: undefined symbol: vgaHWFreeHWRec
+```
+
+### Hardening flags disabled by default {#sec-hardening-flags-disabled-by-default}
+
+The following flags are disabled by default and should be enabled with `hardeningEnable` for packages that take untrusted input like network services.
+
+#### `pie` {#pie}
+
+This flag is disabled by default for normal `glibc` based NixOS package builds, but enabled by default for `musl` based package builds.
+
+Adds the `-fPIE` compiler and `-pie` linker options. Position Independent Executables are needed to take advantage of Address Space Layout Randomization, supported by modern kernel versions. While ASLR can already be enforced for data areas in the stack and heap (brk and mmap), the code areas must be compiled as position-independent. Shared libraries already do this with the `pic` flag, so they gain ASLR automatically, but binary .text regions need to be build with `pie` to gain ASLR. When this happens, ROP attacks are much harder since there are no static locations to bounce off of during a memory corruption attack.
+
+Static libraries need to be compiled with `-fPIE` so that executables can link them in with the `-pie` linker option.
+If the libraries lack `-fPIE`, you will get the error `recompile with -fPIE`.
+
+[^footnote-stdenv-ignored-build-platform]: The build platform is ignored because it is a mere implementation detail of the package satisfying the dependency: As a general programming principle, dependencies are always *specified* as interfaces, not concrete implementation.
+[^footnote-stdenv-native-dependencies-in-path]: Currently, this means for native builds all dependencies are put on the `PATH`. But in the future that may not be the case for sake of matching cross: the platforms would be assumed to be unique for native and cross builds alike, so only the `depsBuild*` and `nativeBuildInputs` would be added to the `PATH`.
+[^footnote-stdenv-propagated-dependencies]: Nix itself already takes a package’s transitive dependencies into account, but this propagation ensures nixpkgs-specific infrastructure like [setup hooks](#ssec-setup-hooks) also are run as if it were a propagated dependency.
+[^footnote-stdenv-find-inputs-location]: The `findInputs` function, currently residing in `pkgs/stdenv/generic/setup.sh`, implements the propagation logic.
+[^footnote-stdenv-sys-lib-search-path]: It clears the `sys_lib_*search_path` variables in the Libtool script to prevent Libtool from using libraries in `/usr/lib` and such.
+[^footnote-stdenv-build-time-guessing-impurity]: Eventually these will be passed building natively as well, to improve determinism: build-time guessing, as is done today, is a risk of impurity.
+[^footnote-stdenv-per-platform-wrapper]: Each wrapper targets a single platform, so if binaries for multiple platforms are needed, the underlying binaries must be wrapped multiple times. As this is a property of the wrapper itself, the multiple wrappings are needed whether or not the same underlying binaries can target multiple platforms.
diff --git a/nixpkgs/doc/style.css b/nixpkgs/doc/style.css
new file mode 100644
index 000000000000..fddf4f4823c7
--- /dev/null
+++ b/nixpkgs/doc/style.css
@@ -0,0 +1,416 @@
+html {
+    line-height: 1.15;
+    -webkit-text-size-adjust: 100%;
+}
+
+body {
+    margin: 0;
+}
+
+.book {
+    margin: auto;
+    width: 100%;
+}
+
+@media screen and (min-width: 768px) {
+    .book {
+        max-width: 46rem;
+    }
+}
+
+@media screen and (min-width: 992px) {
+    .book {
+        max-width: 60rem;
+    }
+}
+
+@media screen and (min-width: 1200px) {
+    .book {
+        max-width: 73rem;
+    }
+}
+
+.book .list-of-examples {
+    display: none;
+}
+
+h1 {
+    font-size: 2em;
+    margin: 0.67em 0;
+}
+
+hr {
+    box-sizing: content-box;
+    height: 0;
+    overflow: visible;
+}
+
+pre {
+    font-family: monospace, monospace;
+    font-size: 1em;
+}
+
+a {
+    background-color: transparent;
+}
+
+strong {
+    font-weight: bolder;
+}
+
+code {
+    font-family: monospace, monospace;
+    font-size: 1em;
+}
+
+sup {
+    font-size: 75%;
+    line-height: 0;
+    position: relative;
+    vertical-align: baseline;
+}
+
+sup {
+    top: -0.5em;
+}
+
+::-webkit-file-upload-button {
+    -webkit-appearance: button;
+    font: inherit;
+}
+
+pre {
+    overflow: auto;
+}
+
+*,
+*::before,
+*::after {
+    box-sizing: border-box;
+}
+
+html {
+    font-size: 100%;
+    line-height: 1.77777778;
+}
+
+@media screen and (min-width: 4000px) {
+    html {
+        background: #000;
+    }
+
+    html body {
+        margin: auto;
+        max-width: 250rem;
+    }
+}
+
+@media screen and (max-width: 320px) {
+    html {
+        font-size: calc(16 / 320 * 100vw);
+    }
+}
+
+body {
+    font-size: 1rem;
+    font-family: 'Roboto', sans-serif;
+    font-weight: 300;
+    color: #000000;
+    background-color: #ffffff;
+    min-height: 100vh;
+    display: flex;
+    flex-direction: column;
+}
+
+@media screen and (max-width: 767.9px) {
+    body {
+        padding-left: 1rem;
+        padding-right: 1rem;
+    }
+}
+
+a {
+    text-decoration: none;
+    border-bottom: 1px solid;
+    color: #405d99;
+}
+
+ul {
+    padding: 0;
+    margin-top: 0;
+    margin-right: 0;
+    margin-bottom: 1rem;
+    margin-left: 1rem;
+}
+
+table {
+    border-collapse: collapse;
+    width: 100%;
+    margin-bottom: 1rem;
+}
+
+thead th {
+    text-align: left;
+}
+
+hr {
+    margin-top: 1rem;
+    margin-bottom: 1rem;
+}
+
+h1 {
+    font-weight: 800;
+    line-height: 110%;
+    font-size: 200%;
+    margin-bottom: 1rem;
+    color: #6586c8;
+}
+
+h2 {
+    font-weight: 800;
+    line-height: 110%;
+    font-size: 170%;
+    margin-bottom: 0.625rem;
+    color: #6586c8;
+}
+
+h2:not(:first-child) {
+    margin-top: 1rem;
+}
+
+h3 {
+    font-weight: 800;
+    line-height: 110%;
+    margin-bottom: 1rem;
+    font-size: 150%;
+    color: #6586c8;
+}
+
+.note h3,
+.tip h3,
+.warning h3,
+.caution h3,
+.important h3 {
+    font-size: 120%;
+}
+
+h4 {
+    font-weight: 800;
+    line-height: 110%;
+    margin-bottom: 1rem;
+    font-size: 140%;
+    color: #6586c8;
+}
+
+h5 {
+    font-weight: 800;
+    line-height: 110%;
+    margin-bottom: 1rem;
+    font-size: 130%;
+    color: #6a6a6a;
+}
+
+h6 {
+    font-weight: 800;
+    line-height: 110%;
+    margin-bottom: 1rem;
+    font-size: 120%
+}
+
+strong {
+    font-weight: bold;
+}
+
+p {
+    margin-top: 0;
+    margin-bottom: 1rem;
+}
+
+dt>*:first-child,
+dd>*:first-child {
+    margin-top: 0;
+}
+
+dt>*:last-child,
+dd>*:last-child {
+    margin-bottom: 0;
+}
+
+pre,
+code {
+    font-family: monospace;
+}
+
+code {
+    color: #ff8657;
+    background: #f4f4f4;
+    display: inline-block;
+    padding: 0 0.5rem;
+    border: 1px solid #d8d8d8;
+    border-radius: 0.5rem;
+    line-height: 1.57777778;
+}
+
+div.book .programlisting,
+div.appendix .programlisting {
+    border-radius: 0.5rem;
+    padding: 1rem;
+    overflow: auto;
+    background: #f2f8fd;
+    color: #000000;
+}
+
+div.book .note,
+div.book .tip,
+div.book .warning,
+div.book .caution,
+div.book .important,
+div.appendix .note,
+div.appendix .tip,
+div.appendix .warning,
+div.appendix .caution,
+div.appendix .important {
+    margin-bottom: 1rem;
+    border-radius: 0.5rem;
+    padding: 1.5rem;
+    overflow: auto;
+    background: #f4f4f4;
+}
+
+div.book .note>.title,
+div.book .tip>.title,
+div.book .warning>.title,
+div.book .caution>.title,
+div.book .important>.title,
+div.appendix .note>.title,
+div.appendix .tip>.title,
+div.appendix .warning>.title,
+div.appendix .caution>.title,
+div.appendix .important>.title {
+    font-weight: 800;
+    /* font-family: 'Overpass', serif; */
+    line-height: 110%;
+    margin-bottom: 1rem;
+    color: inherit;
+    margin-bottom: 0;
+}
+
+div.book .note> :first-child,
+div.book .tip> :first-child,
+div.book .warning> :first-child,
+div.book .caution> :first-child,
+div.book .important> :first-child,
+div.appendix .note> :first-child,
+div.appendix .tip> :first-child,
+div.appendix .warning> :first-child,
+div.appendix .caution> :first-child,
+div.appendix .important> :first-child {
+    margin-top: 0;
+}
+
+div.book .note> :last-child,
+div.book .tip> :last-child,
+div.book .warning> :last-child,
+div.book .caution> :last-child,
+div.book .important> :last-child,
+div.appendix .note> :last-child,
+div.appendix .tip> :last-child,
+div.appendix .warning> :last-child,
+div.appendix .caution> :last-child,
+div.appendix .important> :last-child {
+    margin-bottom: 0;
+}
+
+div.book .note,
+div.book .tip,
+div.appendix .note,
+div.appendix .tip {
+    color: #5277c3;
+    background: #f2f8fd;
+}
+
+div.book .warning,
+div.book .caution,
+div.appendix .warning,
+div.appendix .caution {
+    color: #cc3900;
+    background-color: #fff5e1;
+}
+
+div.book .section,
+div.appendix .section {
+    margin-top: 2em;
+}
+
+div.book div.example,
+div.appendix div.example {
+    margin-top: 1.5em;
+}
+
+div.book br.example-break,
+div.appendix br.example-break {
+    display: none;
+}
+
+div.book div.footnotes>hr,
+div.appendix div.footnotes>hr {
+    border-color: #d8d8d8;
+}
+
+div.book div.footnotes>br,
+div.appendix div.footnotes>br {
+    display: none;
+}
+
+div.book dt,
+div.appendix dt {
+    margin-top: 1em;
+}
+
+div.book .toc dt,
+div.appendix .toc dt {
+    margin-top: 0;
+}
+
+div.book .list-of-examples dt,
+div.appendix .list-of-examples dt {
+    margin-top: 0;
+}
+
+div.book code,
+div.appendix code {
+    padding: 0;
+    border: 0;
+    background-color: inherit;
+    color: inherit;
+    font-size: 100%;
+    -webkit-hyphens: none;
+    -moz-hyphens: none;
+    hyphens: none;
+}
+
+div.book div.toc,
+div.appendix div.toc {
+    margin-bottom: 3em;
+    border-bottom: 0.0625rem solid #d8d8d8;
+}
+
+div.book div.toc dd,
+div.appendix div.toc dd {
+    margin-left: 2em;
+}
+
+div.book span.command,
+div.appendix span.command {
+    font-family: monospace;
+    -webkit-hyphens: none;
+    -moz-hyphens: none;
+    hyphens: none;
+}
+
+div.book .informaltable th,
+div.book .informaltable td,
+div.appendix .informaltable th,
+div.appendix .informaltable td {
+    padding: 0.5rem;
+}
diff --git a/nixpkgs/doc/tests/manpage-urls.py b/nixpkgs/doc/tests/manpage-urls.py
new file mode 100755
index 000000000000..a1ea6d27969e
--- /dev/null
+++ b/nixpkgs/doc/tests/manpage-urls.py
@@ -0,0 +1,109 @@
+#! /usr/bin/env nix-shell
+#! nix-shell -i "python3 -I" -p "python3.withPackages(p: with p; [ aiohttp rich structlog ])"
+
+from argparse import ArgumentParser, Namespace
+from collections import defaultdict
+from collections.abc import Mapping, Sequence
+from enum import IntEnum
+from http import HTTPStatus
+from pathlib import Path
+from typing import Optional
+import asyncio, json, logging
+
+import aiohttp, structlog
+from structlog.contextvars import bound_contextvars as log_context
+
+
+LogLevel = IntEnum('LogLevel', {
+    lvl: getattr(logging, lvl)
+    for lvl in ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL')
+})
+LogLevel.__str__ = lambda self: self.name
+
+
+EXPECTED_STATUS=frozenset((
+    HTTPStatus.OK, HTTPStatus.FOUND,
+    HTTPStatus.NOT_FOUND,
+))
+
+async def check(session: aiohttp.ClientSession, manpage: str, url: str) -> HTTPStatus:
+    with log_context(manpage=manpage, url=url):
+        logger.debug("Checking")
+        async with session.head(url) as resp:
+            st = HTTPStatus(resp.status)
+            match st:
+                case HTTPStatus.OK | HTTPStatus.FOUND:
+                    logger.debug("OK!")
+                case HTTPStatus.NOT_FOUND:
+                    logger.error("Broken link!")
+                case _ if st < 400:
+                    logger.info("Unexpected code", status=st)
+                case _ if 400 <= st < 600:
+                    logger.warn("Unexpected error", status=st)
+
+            return st
+
+async def main(urls_path: Path) -> Mapping[HTTPStatus, int]:
+    logger.info(f"Parsing {urls_path}")
+    with urls_path.open() as urls_file:
+        urls = json.load(urls_file)
+
+    count: defaultdict[HTTPStatus, int] = defaultdict(lambda: 0)
+
+    logger.info(f"Checking URLs from {urls_path}")
+    async with aiohttp.ClientSession() as session:
+        for status in asyncio.as_completed([
+            check(session, manpage, url)
+            for manpage, url in urls.items()
+        ]):
+            count[await status]+=1
+
+    ok = count[HTTPStatus.OK] + count[HTTPStatus.FOUND]
+    broken = count[HTTPStatus.NOT_FOUND]
+    unknown = sum(c for st, c in count.items() if st not in EXPECTED_STATUS)
+    logger.info(f"Done: {broken} broken links, "
+                f"{ok} correct links, and {unknown} unexpected status")
+
+    return count
+
+
+def parse_args(args: Optional[Sequence[str]] = None) -> Namespace:
+    parser = ArgumentParser(
+        prog = 'check-manpage-urls',
+        description = 'Check the validity of the manpage URLs linked in the nixpkgs manual',
+    )
+    parser.add_argument(
+        '-l', '--log-level',
+        default = os.getenv('LOG_LEVEL', 'INFO'),
+        type = lambda s: LogLevel[s],
+        choices = list(LogLevel),
+    )
+    parser.add_argument(
+        'file',
+        type = Path,
+        nargs = '?',
+    )
+
+    return parser.parse_args(args)
+
+
+if __name__ == "__main__":
+    import os, sys
+
+    args = parse_args()
+
+    structlog.configure(
+        wrapper_class=structlog.make_filtering_bound_logger(args.log_level),
+    )
+    logger = structlog.getLogger("check-manpage-urls.py")
+
+    urls_path = args.file
+    if urls_path is None:
+        REPO_ROOT = Path(__file__).parent.parent.parent.parent
+        logger.info(f"Assuming we are in a nixpkgs repo rooted at {REPO_ROOT}")
+
+        urls_path = REPO_ROOT / 'doc' / 'manpage-urls.json'
+
+    count = asyncio.run(main(urls_path))
+
+    sys.exit(0 if count[HTTPStatus.NOT_FOUND] == 0 else 1)
diff --git a/nixpkgs/doc/using-nixpkgs.md b/nixpkgs/doc/using-nixpkgs.md
new file mode 100644
index 000000000000..f850b2e83c28
--- /dev/null
+++ b/nixpkgs/doc/using-nixpkgs.md
@@ -0,0 +1,8 @@
+# Using Nixpkgs {#part-using}
+
+```{=include=} chapters
+using/platform-support.chapter.md
+using/configuration.chapter.md
+using/overlays.chapter.md
+using/overrides.chapter.md
+```
diff --git a/nixpkgs/doc/using/configuration.chapter.md b/nixpkgs/doc/using/configuration.chapter.md
new file mode 100644
index 000000000000..252d255de829
--- /dev/null
+++ b/nixpkgs/doc/using/configuration.chapter.md
@@ -0,0 +1,372 @@
+# Global configuration {#chap-packageconfig}
+
+Nix comes with certain defaults about what packages can and cannot be installed, based on a package's metadata. By default, Nix will prevent installation if any of the following criteria are true:
+
+-   The package is thought to be broken, and has had its `meta.broken` set to `true`.
+
+-   The package isn't intended to run on the given system, as none of its `meta.platforms` match the given system.
+
+-   The package's `meta.license` is set to a license which is considered to be unfree.
+
+-   The package has known security vulnerabilities but has not or can not be updated for some reason, and a list of issues has been entered in to the package's `meta.knownVulnerabilities`.
+
+Note that all this is checked during evaluation already, and the check includes any package that is evaluated. In particular, all build-time dependencies are checked. `nix-env -qa` will (attempt to) hide any packages that would be refused.
+
+Each of these criteria can be altered in the nixpkgs configuration.
+
+The nixpkgs configuration for a NixOS system is set in the `configuration.nix`, as in the following example:
+
+```nix
+{
+  nixpkgs.config = {
+    allowUnfree = true;
+  };
+}
+```
+
+However, this does not allow unfree software for individual users. Their configurations are managed separately.
+
+A user's nixpkgs configuration is stored in a user-specific configuration file located at `~/.config/nixpkgs/config.nix`. For example:
+
+```nix
+{
+  allowUnfree = true;
+}
+```
+
+Note that we are not able to test or build unfree software on Hydra due to policy. Most unfree licenses prohibit us from either executing or distributing the software.
+
+## Installing broken packages {#sec-allow-broken}
+
+There are two ways to try compiling a package which has been marked as broken.
+
+-   For allowing the build of a broken package once, you can use an environment variable for a single invocation of the nix tools:
+
+    ```ShellSession
+    $ export NIXPKGS_ALLOW_BROKEN=1
+    ```
+
+-   For permanently allowing broken packages to be built, you may add `allowBroken = true;` to your user's configuration file, like this:
+
+    ```nix
+    {
+      allowBroken = true;
+    }
+    ```
+
+
+## Installing packages on unsupported systems {#sec-allow-unsupported-system}
+
+There are also two ways to try compiling a package which has been marked as unsupported for the given system.
+
+-   For allowing the build of an unsupported package once, you can use an environment variable for a single invocation of the nix tools:
+
+    ```ShellSession
+    $ export NIXPKGS_ALLOW_UNSUPPORTED_SYSTEM=1
+    ```
+
+-   For permanently allowing unsupported packages to be built, you may add `allowUnsupportedSystem = true;` to your user's configuration file, like this:
+
+    ```nix
+    {
+      allowUnsupportedSystem = true;
+    }
+    ```
+
+The difference between a package being unsupported on some system and being broken is admittedly a bit fuzzy. If a program *ought* to work on a certain platform, but doesn't, the platform should be included in `meta.platforms`, but marked as broken with e.g.  `meta.broken = !hostPlatform.isWindows`. Of course, this begs the question of what "ought" means exactly. That is left to the package maintainer.
+
+## Installing unfree packages {#sec-allow-unfree}
+
+All users of Nixpkgs are free software users, and many users (and developers) of Nixpkgs want to limit and tightly control their exposure to unfree software.
+At the same time, many users need (or want) to run some specific pieces of proprietary software.
+Nixpkgs includes some expressions for unfree software packages.
+By default unfree software cannot be installed and doesn’t show up in searches.
+
+There are several ways to tweak how Nix handles a package which has been marked as unfree.
+
+-   To temporarily allow all unfree packages, you can use an environment variable for a single invocation of the nix tools:
+
+    ```ShellSession
+    $ export NIXPKGS_ALLOW_UNFREE=1
+    ```
+
+-   It is possible to permanently allow individual unfree packages, while still blocking unfree packages by default using the `allowUnfreePredicate` configuration option in the user configuration file.
+
+    This option is a function which accepts a package as a parameter, and returns a boolean. The following example configuration accepts a package and always returns false:
+
+    ```nix
+    {
+      allowUnfreePredicate = (pkg: false);
+    }
+    ```
+
+    For a more useful example, try the following. This configuration only allows unfree packages named roon-server and visual studio code:
+
+    ```nix
+    {
+      allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [
+        "roon-server"
+        "vscode"
+      ];
+    }
+    ```
+
+-   It is also possible to allow and block licenses that are specifically acceptable or not acceptable, using `allowlistedLicenses` and `blocklistedLicenses`, respectively.
+
+    The following example configuration allowlists the licenses `amd` and `wtfpl`:
+
+    ```nix
+    {
+      allowlistedLicenses = with lib.licenses; [ amd wtfpl ];
+    }
+    ```
+
+    The following example configuration blocklists the `gpl3Only` and `agpl3Only` licenses:
+
+    ```nix
+    {
+      blocklistedLicenses = with lib.licenses; [ agpl3Only gpl3Only ];
+    }
+    ```
+
+    Note that `allowlistedLicenses` only applies to unfree licenses unless `allowUnfree` is enabled. It is not a generic allowlist for all types of licenses. `blocklistedLicenses` applies to all licenses.
+
+A complete list of licenses can be found in the file `lib/licenses.nix` of the nixpkgs tree.
+
+## Installing insecure packages {#sec-allow-insecure}
+
+There are several ways to tweak how Nix handles a package which has been marked as insecure.
+
+-   To temporarily allow all insecure packages, you can use an environment variable for a single invocation of the nix tools:
+
+    ```ShellSession
+    $ export NIXPKGS_ALLOW_INSECURE=1
+    ```
+
+-   It is possible to permanently allow individual insecure packages, while still blocking other insecure packages by default using the `permittedInsecurePackages` configuration option in the user configuration file.
+
+    The following example configuration permits the installation of the hypothetically insecure package `hello`, version `1.2.3`:
+
+    ```nix
+    {
+      permittedInsecurePackages = [
+        "hello-1.2.3"
+      ];
+    }
+    ```
+
+-   It is also possible to create a custom policy around which insecure packages to allow and deny, by overriding the `allowInsecurePredicate` configuration option.
+
+    The `allowInsecurePredicate` option is a function which accepts a package and returns a boolean, much like `allowUnfreePredicate`.
+
+    The following configuration example only allows insecure packages with very short names:
+
+    ```nix
+    {
+      allowInsecurePredicate = pkg: builtins.stringLength (lib.getName pkg) <= 5;
+    }
+    ```
+
+    Note that `permittedInsecurePackages` is only checked if `allowInsecurePredicate` is not specified.
+
+## Modify packages via `packageOverrides` {#sec-modify-via-packageOverrides}
+
+You can define a function called `packageOverrides` in your local `~/.config/nixpkgs/config.nix` to override Nix packages. It must be a function that takes pkgs as an argument and returns a modified set of packages.
+
+```nix
+{
+  packageOverrides = pkgs: rec {
+    foo = pkgs.foo.override { /* ... */ };
+  };
+}
+```
+
+## `config` Options Reference {#sec-config-options-reference}
+
+The following attributes can be passed in [`config`](#chap-packageconfig).
+
+```{=include=} options
+id-prefix: opt-
+list-id: configuration-variable-list
+source: ../config-options.json
+```
+
+
+## Declarative Package Management {#sec-declarative-package-management}
+
+### Build an environment {#sec-building-environment}
+
+Using `packageOverrides`, it is possible to manage packages declaratively. This means that we can list all of our desired packages within a declarative Nix expression. For example, to have `aspell`, `bc`, `ffmpeg`, `coreutils`, `gdb`, `nixUnstable`, `emscripten`, `jq`, `nox`, and `silver-searcher`, we could use the following in `~/.config/nixpkgs/config.nix`:
+
+```nix
+{
+  packageOverrides = pkgs: with pkgs; {
+    myPackages = pkgs.buildEnv {
+      name = "my-packages";
+      paths = [
+        aspell
+        bc
+        coreutils
+        gdb
+        ffmpeg
+        nixUnstable
+        emscripten
+        jq
+        nox
+        silver-searcher
+      ];
+    };
+  };
+}
+```
+
+To install it into our environment, you can just run `nix-env -iA nixpkgs.myPackages`. If you want to load the packages to be built from a working copy of `nixpkgs` you just run `nix-env -f. -iA myPackages`. To explore what's been installed, just look through `~/.nix-profile/`. You can see that a lot of stuff has been installed. Some of this stuff is useful some of it isn't. Let's tell Nixpkgs to only link the stuff that we want:
+
+```nix
+{
+  packageOverrides = pkgs: with pkgs; {
+    myPackages = pkgs.buildEnv {
+      name = "my-packages";
+      paths = [
+        aspell
+        bc
+        coreutils
+        gdb
+        ffmpeg
+        nixUnstable
+        emscripten
+        jq
+        nox
+        silver-searcher
+      ];
+      pathsToLink = [ "/share" "/bin" ];
+    };
+  };
+}
+```
+
+`pathsToLink` tells Nixpkgs to only link the paths listed which gets rid of the extra stuff in the profile. `/bin` and `/share` are good defaults for a user environment, getting rid of the clutter. If you are running on Nix on MacOS, you may want to add another path as well, `/Applications`, that makes GUI apps available.
+
+### Getting documentation {#sec-getting-documentation}
+
+After building that new environment, look through `~/.nix-profile` to make sure everything is there that we wanted. Discerning readers will note that some files are missing. Look inside `~/.nix-profile/share/man/man1/` to verify this. There are no man pages for any of the Nix tools! This is because some packages like Nix have multiple outputs for things like documentation (see section 4). Let's make Nix install those as well.
+
+```nix
+{
+  packageOverrides = pkgs: with pkgs; {
+    myPackages = pkgs.buildEnv {
+      name = "my-packages";
+      paths = [
+        aspell
+        bc
+        coreutils
+        ffmpeg
+        nixUnstable
+        emscripten
+        jq
+        nox
+        silver-searcher
+      ];
+      pathsToLink = [ "/share/man" "/share/doc" "/bin" ];
+      extraOutputsToInstall = [ "man" "doc" ];
+    };
+  };
+}
+```
+
+This provides us with some useful documentation for using our packages.  However, if we actually want those manpages to be detected by man, we need to set up our environment. This can also be managed within Nix expressions.
+
+```nix
+{
+  packageOverrides = pkgs: with pkgs; rec {
+    myProfile = writeText "my-profile" ''
+      export PATH=$HOME/.nix-profile/bin:/nix/var/nix/profiles/default/bin:/sbin:/bin:/usr/sbin:/usr/bin
+      export MANPATH=$HOME/.nix-profile/share/man:/nix/var/nix/profiles/default/share/man:/usr/share/man
+    '';
+    myPackages = pkgs.buildEnv {
+      name = "my-packages";
+      paths = [
+        (runCommand "profile" {} ''
+          mkdir -p $out/etc/profile.d
+          cp ${myProfile} $out/etc/profile.d/my-profile.sh
+        '')
+        aspell
+        bc
+        coreutils
+        ffmpeg
+        man
+        nixUnstable
+        emscripten
+        jq
+        nox
+        silver-searcher
+      ];
+      pathsToLink = [ "/share/man" "/share/doc" "/bin" "/etc" ];
+      extraOutputsToInstall = [ "man" "doc" ];
+    };
+  };
+}
+```
+
+For this to work fully, you must also have this script sourced when you are logged in. Try adding something like this to your `~/.profile` file:
+
+```ShellSession
+#!/bin/sh
+if [ -d "${HOME}/.nix-profile/etc/profile.d" ]; then
+  for i in "${HOME}/.nix-profile/etc/profile.d/"*.sh; do
+    if [ -r "$i" ]; then
+      . "$i"
+    fi
+  done
+fi
+```
+
+Now just run `. "${HOME}/.profile"` and you can start loading man pages from your environment.
+
+### GNU info setup {#sec-gnu-info-setup}
+
+Configuring GNU info is a little bit trickier than man pages. To work correctly, info needs a database to be generated. This can be done with some small modifications to our environment scripts.
+
+```nix
+{
+  packageOverrides = pkgs: with pkgs; rec {
+    myProfile = writeText "my-profile" ''
+      export PATH=$HOME/.nix-profile/bin:/nix/var/nix/profiles/default/bin:/sbin:/bin:/usr/sbin:/usr/bin
+      export MANPATH=$HOME/.nix-profile/share/man:/nix/var/nix/profiles/default/share/man:/usr/share/man
+      export INFOPATH=$HOME/.nix-profile/share/info:/nix/var/nix/profiles/default/share/info:/usr/share/info
+    '';
+    myPackages = pkgs.buildEnv {
+      name = "my-packages";
+      paths = [
+        (runCommand "profile" {} ''
+          mkdir -p $out/etc/profile.d
+          cp ${myProfile} $out/etc/profile.d/my-profile.sh
+        '')
+        aspell
+        bc
+        coreutils
+        ffmpeg
+        man
+        nixUnstable
+        emscripten
+        jq
+        nox
+        silver-searcher
+        texinfoInteractive
+      ];
+      pathsToLink = [ "/share/man" "/share/doc" "/share/info" "/bin" "/etc" ];
+      extraOutputsToInstall = [ "man" "doc" "info" ];
+      postBuild = ''
+        if [ -x $out/bin/install-info -a -w $out/share/info ]; then
+          shopt -s nullglob
+          for i in $out/share/info/*.info $out/share/info/*.info.gz; do
+              $out/bin/install-info $i $out/share/info/dir
+          done
+        fi
+      '';
+    };
+  };
+}
+```
+
+`postBuild` tells Nixpkgs to run a command after building the environment. In this case, `install-info` adds the installed info pages to `dir` which is GNU info's default root node. Note that `texinfoInteractive` is added to the environment to give the `install-info` command.
diff --git a/nixpkgs/doc/using/overlays.chapter.md b/nixpkgs/doc/using/overlays.chapter.md
new file mode 100644
index 000000000000..46200730f0b2
--- /dev/null
+++ b/nixpkgs/doc/using/overlays.chapter.md
@@ -0,0 +1,167 @@
+# Overlays {#chap-overlays}
+
+This chapter describes how to extend and change Nixpkgs using overlays.  Overlays are used to add layers in the fixed-point used by Nixpkgs to compose the set of all packages.
+
+Nixpkgs can be configured with a list of overlays, which are applied in order. This means that the order of the overlays can be significant if multiple layers override the same package.
+
+## Installing overlays {#sec-overlays-install}
+
+The list of overlays can be set either explicitly in a Nix expression, or through `<nixpkgs-overlays>` or user configuration files.
+
+### Set overlays in NixOS or Nix expressions {#sec-overlays-argument}
+
+On a NixOS system the value of the `nixpkgs.overlays` option, if present, is passed to the system Nixpkgs directly as an argument. Note that this does not affect the overlays for non-NixOS operations (e.g.  `nix-env`), which are [looked up](#sec-overlays-lookup) independently.
+
+The list of overlays can be passed explicitly when importing nixpkgs, for example `import <nixpkgs> { overlays = [ overlay1 overlay2 ]; }`.
+
+NOTE: DO NOT USE THIS in nixpkgs. Further overlays can be added by calling the `pkgs.extend` or `pkgs.appendOverlays`, although it is often preferable to avoid these functions, because they recompute the Nixpkgs fixpoint, which is somewhat expensive to do.
+
+### Install overlays via configuration lookup {#sec-overlays-lookup}
+
+The list of overlays is determined as follows.
+
+1.  First, if an [`overlays` argument](#sec-overlays-argument) to the Nixpkgs function itself is given, then that is used and no path lookup will be performed.
+
+2.  Otherwise, if the Nix path entry `<nixpkgs-overlays>` exists, we look for overlays at that path, as described below.
+
+    See the [section on `NIX_PATH`](https://nixos.org/manual/nix/stable/command-ref/env-common.html#env-NIX_PATH) in the Nix manual for more details on how to set a value for `<nixpkgs-overlays>.`
+
+3.  If one of `~/.config/nixpkgs/overlays.nix` and `~/.config/nixpkgs/overlays/` exists, then we look for overlays at that path, as described below. It is an error if both exist.
+
+If we are looking for overlays at a path, then there are two cases:
+
+-   If the path is a file, then the file is imported as a Nix expression and used as the list of overlays.
+
+-   If the path is a directory, then we take the content of the directory, order it lexicographically, and attempt to interpret each as an overlay by:
+
+    -   Importing the file, if it is a `.nix` file.
+
+    -   Importing a top-level `default.nix` file, if it is a directory.
+
+Because overlays that are set in NixOS configuration do not affect non-NixOS operations such as `nix-env`, the `overlays.nix` option provides a convenient way to use the same overlays for a NixOS system configuration and user configuration: the same file can be used as `overlays.nix` and imported as the value of `nixpkgs.overlays`.
+
+## Defining overlays {#sec-overlays-definition}
+
+Overlays are Nix functions which accept two arguments, conventionally called `self` and `super`, and return a set of packages. For example, the following is a valid overlay.
+
+```nix
+self: super:
+
+{
+  boost = super.boost.override {
+    python = self.python3;
+  };
+  rr = super.callPackage ./pkgs/rr {
+    stdenv = self.stdenv_32bit;
+  };
+}
+```
+
+The first argument (`self`) corresponds to the final package set. You should use this set for the dependencies of all packages specified in your overlay. For example, all the dependencies of `rr` in the example above come from `self`, as well as the overridden dependencies used in the `boost` override.
+
+The second argument (`super`) corresponds to the result of the evaluation of the previous stages of Nixpkgs. It does not contain any of the packages added by the current overlay, nor any of the following overlays. This set should be used either to refer to packages you wish to override, or to access functions defined in Nixpkgs. For example, the original recipe of `boost` in the above example, comes from `super`, as well as the `callPackage` function.
+
+The value returned by this function should be a set similar to `pkgs/top-level/all-packages.nix`, containing overridden and/or new packages.
+
+Overlays are similar to other methods for customizing Nixpkgs, in particular the `packageOverrides` attribute described in [](#sec-modify-via-packageOverrides). Indeed, `packageOverrides` acts as an overlay with only the `super` argument. It is therefore appropriate for basic use, but overlays are more powerful and easier to distribute.
+
+## Using overlays to configure alternatives {#sec-overlays-alternatives}
+
+Certain software packages have different implementations of the same interface. Other distributions have functionality to switch between these. For example, Debian provides [DebianAlternatives](https://wiki.debian.org/DebianAlternatives).  Nixpkgs has what we call `alternatives`, which are configured through overlays.
+
+### BLAS/LAPACK {#sec-overlays-alternatives-blas-lapack}
+
+In Nixpkgs, we have multiple implementations of the BLAS/LAPACK numerical linear algebra interfaces. They are:
+
+-   [OpenBLAS](https://www.openblas.net/)
+
+    The Nixpkgs attribute is `openblas` for ILP64 (integer width = 64 bits) and `openblasCompat` for LP64 (integer width = 32 bits).  `openblasCompat` is the default.
+
+-   [LAPACK reference](https://www.netlib.org/lapack/) (also provides BLAS and CBLAS)
+
+    The Nixpkgs attribute is `lapack-reference`.
+
+-   [Intel MKL](https://software.intel.com/en-us/mkl) (only works on the x86_64 architecture, unfree)
+
+    The Nixpkgs attribute is `mkl`.
+
+-   [BLIS](https://github.com/flame/blis)
+
+    BLIS, available through the attribute `blis`, is a framework for linear algebra kernels. In addition, it implements the BLAS interface.
+
+-   [AMD BLIS/LIBFLAME](https://developer.amd.com/amd-aocl/blas-library/) (optimized for modern AMD x86_64 CPUs)
+
+    The AMD fork of the BLIS library, with attribute `amd-blis`, extends BLIS with optimizations for modern AMD CPUs. The changes are usually submitted to the upstream BLIS project after some time. However, AMD BLIS typically provides some performance improvements on AMD Zen CPUs. The complementary AMD LIBFLAME library, with attribute `amd-libflame`, provides a LAPACK implementation.
+
+Introduced in [PR #83888](https://github.com/NixOS/nixpkgs/pull/83888), we are able to override the `blas` and `lapack` packages to use different implementations, through the `blasProvider` and `lapackProvider` argument. This can be used to select a different provider. BLAS providers will have symlinks in `$out/lib/libblas.so.3` and `$out/lib/libcblas.so.3` to their respective BLAS libraries.  Likewise, LAPACK providers will have symlinks in `$out/lib/liblapack.so.3` and `$out/lib/liblapacke.so.3` to their respective LAPACK libraries. For example, Intel MKL is both a BLAS and LAPACK provider. An overlay can be created to use Intel MKL that looks like:
+
+```nix
+self: super:
+
+{
+  blas = super.blas.override {
+    blasProvider = self.mkl;
+  };
+
+  lapack = super.lapack.override {
+    lapackProvider = self.mkl;
+  };
+}
+```
+
+This overlay uses Intel's MKL library for both BLAS and LAPACK interfaces. Note that the same can be accomplished at runtime using `LD_LIBRARY_PATH` of `libblas.so.3` and `liblapack.so.3`. For instance:
+
+```ShellSession
+$ LD_LIBRARY_PATH=$(nix-build -A mkl)/lib${LD_LIBRARY_PATH:+:}$LD_LIBRARY_PATH nix-shell -p octave --run octave
+```
+
+Intel MKL requires an `openmp` implementation when running with multiple processors. By default, `mkl` will use Intel's `iomp` implementation if no other is specified, but this is a runtime-only dependency and binary compatible with the LLVM implementation. To use that one instead, Intel recommends users set it with `LD_PRELOAD`. Note that `mkl` is only available on `x86_64-linux` and `x86_64-darwin`. Moreover, Hydra is not building and distributing pre-compiled binaries using it.
+
+To override `blas` and `lapack` with its reference implementations (i.e. for development purposes), one can use the following overlay:
+
+```nix
+self: super:
+
+{
+  blas = super.blas.override {
+    blasProvider = self.lapack-reference;
+  };
+
+  lapack = super.lapack.override {
+    lapackProvider = self.lapack-reference;
+  };
+}
+```
+
+For BLAS/LAPACK switching to work correctly, all packages must depend on `blas` or `lapack`. This ensures that only one BLAS/LAPACK library is used at one time. There are two versions of BLAS/LAPACK currently in the wild, `LP64` (integer size = 32 bits) and `ILP64` (integer size = 64 bits). The attributes `blas` and `lapack` are `LP64` by default. Their `ILP64` version are provided through the attributes `blas-ilp64` and `lapack-ilp64`. Some software needs special flags or patches to work with `ILP64`. You can check if `ILP64` is used in Nixpkgs with `blas.isILP64` and `lapack.isILP64`. Some software does NOT work with `ILP64`, and derivations need to specify an assertion to prevent this. You can prevent `ILP64` from being used with the following:
+
+```nix
+{ stdenv, blas, lapack, ... }:
+
+assert (!blas.isILP64) && (!lapack.isILP64);
+
+stdenv.mkDerivation {
+  # ...
+}
+```
+
+### Switching the MPI implementation {#sec-overlays-alternatives-mpi}
+
+All programs that are built with [MPI](https://en.wikipedia.org/wiki/Message_Passing_Interface) support use the generic attribute `mpi` as an input. At the moment Nixpkgs natively provides two different MPI implementations:
+
+-   [Open MPI](https://www.open-mpi.org/) (default), attribute name
+    `openmpi`
+
+-   [MPICH](https://www.mpich.org/), attribute name `mpich`
+
+-   [MVAPICH](https://mvapich.cse.ohio-state.edu/), attribute name `mvapich`
+
+To provide MPI enabled applications that use `MPICH`, instead of the default `Open MPI`, use the following overlay:
+
+```nix
+self: super:
+
+{
+  mpi = self.mpich;
+}
+```
diff --git a/nixpkgs/doc/using/overrides.chapter.md b/nixpkgs/doc/using/overrides.chapter.md
new file mode 100644
index 000000000000..8c6ed79076c2
--- /dev/null
+++ b/nixpkgs/doc/using/overrides.chapter.md
@@ -0,0 +1,134 @@
+# Overriding {#chap-overrides}
+
+Sometimes one wants to override parts of `nixpkgs`, e.g. derivation attributes, the results of derivations.
+
+These functions are used to make changes to packages, returning only single packages. [Overlays](#chap-overlays), on the other hand, can be used to combine the overridden packages across the entire package set of Nixpkgs.
+
+## &lt;pkg&gt;.override {#sec-pkg-override}
+
+The function `override` is usually available for all the derivations in the nixpkgs expression (`pkgs`).
+
+It is used to override the arguments passed to a function.
+
+Example usages:
+
+```nix
+pkgs.foo.override { arg1 = val1; arg2 = val2; /* ... */ }
+```
+
+It's also possible to access the previous arguments.
+
+```nix
+pkgs.foo.override (previous: { arg1 = previous.arg1; /* ... */ })
+```
+
+<!-- TODO: move below programlisting to a new section about extending and overlays and reference it -->
+
+```nix
+import pkgs.path { overlays = [ (self: super: {
+  foo = super.foo.override { barSupport = true ; };
+  })];}
+```
+
+```nix
+{
+  mypkg = pkgs.callPackage ./mypkg.nix {
+    mydep = pkgs.mydep.override { /* ... */ };
+  };
+}
+```
+
+In the first example, `pkgs.foo` is the result of a function call with some default arguments, usually a derivation. Using `pkgs.foo.override` will call the same function with the given new arguments.
+
+## &lt;pkg&gt;.overrideAttrs {#sec-pkg-overrideAttrs}
+
+The function `overrideAttrs` allows overriding the attribute set passed to a `stdenv.mkDerivation` call, producing a new derivation based on the original one. This function is available on all derivations produced by the `stdenv.mkDerivation` function, which is most packages in the nixpkgs expression `pkgs`.
+
+Example usages:
+
+```nix
+{
+  helloBar = pkgs.hello.overrideAttrs (finalAttrs: previousAttrs: {
+    pname = previousAttrs.pname + "-bar";
+  });
+}
+```
+
+In the above example, "-bar" is appended to the pname attribute, while all other attributes will be retained from the original `hello` package.
+
+The argument `previousAttrs` is conventionally used to refer to the attr set originally passed to `stdenv.mkDerivation`.
+
+The argument `finalAttrs` refers to the final attributes passed to `mkDerivation`, plus the `finalPackage` attribute which is equal to the result of `mkDerivation` or subsequent `overrideAttrs` calls.
+
+If only a one-argument function is written, the argument has the meaning of `previousAttrs`.
+
+Function arguments can be omitted entirely if there is no need to access `previousAttrs` or `finalAttrs`.
+
+```nix
+{
+  helloWithDebug = pkgs.hello.overrideAttrs {
+    separateDebugInfo = true;
+  };
+}
+```
+
+In the above example, the `separateDebugInfo` attribute is overridden to be true, thus building debug info for `helloWithDebug`.
+
+::: {.note}
+Note that `separateDebugInfo` is processed only by the `stdenv.mkDerivation` function, not the generated, raw Nix derivation. Thus, using `overrideDerivation` will not work in this case, as it overrides only the attributes of the final derivation. It is for this reason that `overrideAttrs` should be preferred in (almost) all cases to `overrideDerivation`, i.e. to allow using `stdenv.mkDerivation` to process input arguments, as well as the fact that it is easier to use (you can use the same attribute names you see in your Nix code, instead of the ones generated (e.g. `buildInputs` vs `nativeBuildInputs`), and it involves less typing).
+:::
+
+## &lt;pkg&gt;.overrideDerivation {#sec-pkg-overrideDerivation}
+
+::: {.warning}
+You should prefer `overrideAttrs` in almost all cases, see its documentation for the reasons why. `overrideDerivation` is not deprecated and will continue to work, but is less nice to use and does not have as many abilities as `overrideAttrs`.
+:::
+
+::: {.warning}
+Do not use this function in Nixpkgs as it evaluates a derivation before modifying it, which breaks package abstraction. In addition, this evaluation-per-function application incurs a performance penalty, which can become a problem if many overrides are used. It is only intended for ad-hoc customisation, such as in `~/.config/nixpkgs/config.nix`.
+:::
+
+The function `overrideDerivation` creates a new derivation based on an existing one by overriding the original's attributes with the attribute set produced by the specified function. This function is available on all derivations defined using the `makeOverridable` function. Most standard derivation-producing functions, such as `stdenv.mkDerivation`, are defined using this function, which means most packages in the nixpkgs expression, `pkgs`, have this function.
+
+Example usage:
+
+```nix
+{
+  mySed = pkgs.gnused.overrideDerivation (oldAttrs: {
+    name = "sed-4.2.2-pre";
+    src = fetchurl {
+      url = "ftp://alpha.gnu.org/gnu/sed/sed-4.2.2-pre.tar.bz2";
+      hash = "sha256-MxBJRcM2rYzQYwJ5XKxhXTQByvSg5jZc5cSHEZoB2IY=";
+    };
+    patches = [];
+  });
+}
+```
+
+In the above example, the `name`, `src`, and `patches` of the derivation will be overridden, while all other attributes will be retained from the original derivation.
+
+The argument `oldAttrs` is used to refer to the attribute set of the original derivation.
+
+::: {.note}
+A package's attributes are evaluated *before* being modified by the `overrideDerivation` function. For example, the `name` attribute reference in `url = "mirror://gnu/hello/${name}.tar.gz";` is filled-in *before* the `overrideDerivation` function modifies the attribute set. This means that overriding the `name` attribute, in this example, *will not* change the value of the `url` attribute. Instead, we need to override both the `name` *and* `url` attributes.
+:::
+
+## lib.makeOverridable {#sec-lib-makeOverridable}
+
+The function `lib.makeOverridable` is used to make the result of a function easily customizable. This utility only makes sense for functions that accept an argument set and return an attribute set.
+
+Example usage:
+
+```nix
+{
+  f = { a, b }: { result = a+b; };
+  c = lib.makeOverridable f { a = 1; b = 2; };
+}
+```
+
+The variable `c` is the value of the `f` function applied with some default arguments. Hence the value of `c.result` is `3`, in this example.
+
+The variable `c` however also has some additional functions, like
+[c.override](#sec-pkg-override) which can be used to override the
+default arguments. In this example the value of
+`(c.override { a = 4; }).result` is 6.
diff --git a/nixpkgs/doc/using/platform-support.chapter.md b/nixpkgs/doc/using/platform-support.chapter.md
new file mode 100644
index 000000000000..3f91b3d5d980
--- /dev/null
+++ b/nixpkgs/doc/using/platform-support.chapter.md
@@ -0,0 +1,18 @@
+# Platform Support {#chap-platform-support}
+
+Packages receive varying degrees of support, both in terms of maintainer attention and available computation resources for continuous integration (CI).
+
+Below is the list of the best supported platforms:
+
+- `x86_64-linux`: Highest level of support.
+- `aarch64-linux`: Well supported, with most packages building successfully in CI.
+- `aarch64-darwin`: Receives better support than `x86_64-darwin`.
+- `x86_64-darwin`: Receives some support.
+
+There are many other platforms with varying levels of support.
+The provisional platform list in [Appendix A] of [RFC046], while not up to date, can be used as guidance.
+
+A more formal definition of the platform support tiers is provided in [RFC046], but has not been fully implemented yet.
+
+[RFC046]: https://github.com/NixOS/rfcs/blob/master/rfcs/0046-platform-support-tiers.md
+[Appendix A]: https://github.com/NixOS/rfcs/blob/master/rfcs/0046-platform-support-tiers.md#appendix-a-non-normative-description-of-platforms-in-november-2019