Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package nix for openSUSE:Factory checked in 
at 2026-05-05 17:59:00
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/nix (Old)
 and      /work/SRC/openSUSE:Factory/.nix.new.30200 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "nix"

Tue May  5 17:59:00 2026 rev:18 rq:1350935 version:2.34.7

Changes:
--------
--- /work/SRC/openSUSE:Factory/nix/nix.changes  2026-04-13 23:19:06.080655241 
+0200
+++ /work/SRC/openSUSE:Factory/.nix.new.30200/nix.changes       2026-05-05 
17:59:02.036862969 +0200
@@ -1,0 +2,7 @@
+Mon May  4 20:14:28 UTC 2026 - Marcus Rueckert <[email protected]>
+
+- Update to version 2.34.7: [CVE-2026-44028, CVE-2026-44029]
+  see /usr/share/doc/packages/rl-2.34.md
+  
https://discourse.nixos.org/t/security-advisory-local-privilege-escalation-in-lix-and-nix/77407
+
+-------------------------------------------------------------------

Old:
----
  nix-2.34.6.tar.gz

New:
----
  nix-2.34.7.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ nix.spec ++++++
--- /var/tmp/diff_new_pack.HxwBMi/_old  2026-05-05 17:59:02.796894401 +0200
+++ /var/tmp/diff_new_pack.HxwBMi/_new  2026-05-05 17:59:02.796894401 +0200
@@ -26,7 +26,7 @@
 %endif
 
 Name:           nix
-Version:        2.34.6
+Version:        2.34.7
 Release:        0
 Summary:        The purely functional package manager
 License:        LGPL-2.1-only

++++++ nix-2.34.6.tar.gz -> nix-2.34.7.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/nix-2.34.6/.github/actions/install-nix-action/action.yaml 
new/nix-2.34.7/.github/actions/install-nix-action/action.yaml
--- old/nix-2.34.6/.github/actions/install-nix-action/action.yaml       
2026-04-11 18:54:54.000000000 +0200
+++ new/nix-2.34.7/.github/actions/install-nix-action/action.yaml       
2026-05-04 19:22:33.000000000 +0200
@@ -4,22 +4,12 @@
   dogfood:
     description: "Whether to use Nix installed from the latest artifact from 
master branch"
     required: true # Be explicit about the fact that we are using unreleased 
artifacts
-  experimental-installer:
-    description: "Whether to use the experimental installer to install Nix"
-    default: false
-  experimental-installer-version:
-    description: "Version of the experimental installer to use. If `latest`, 
the newest artifact from the default branch is used."
-    # TODO: This should probably be pinned to a release after 
https://github.com/NixOS/experimental-nix-installer/pull/49 lands in one
-    default: "latest"
   extra_nix_config:
     description: "Gets appended to `/etc/nix/nix.conf` if passed."
   install_url:
     description: "URL of the Nix installer"
     required: false
-    default: "https://releases.nixos.org/nix/nix-2.32.1/install";
-  tarball_url:
-    description: "URL of the Nix tarball to use with the experimental 
installer"
-    required: false
+    default: "https://releases.nixos.org/nix/nix-2.34.6/install";
   github_token:
     description: "Github token"
     required: true
@@ -51,74 +41,14 @@
 
         gh run download "$RUN_ID" --repo "$DOGFOOD_REPO" -n 
"$INSTALLER_ARTIFACT" -D "$INSTALLER_DOWNLOAD_DIR"
         echo "installer-path=file://$INSTALLER_DOWNLOAD_DIR" >> 
"$GITHUB_OUTPUT"
-        TARBALL_PATH="$(find "$INSTALLER_DOWNLOAD_DIR" -name 'nix*.tar.xz' 
-print | head -n 1)"
-        echo "tarball-path=file://$TARBALL_PATH" >> "$GITHUB_OUTPUT"
 
         echo "::notice ::Dogfooding Nix installer from master 
(https://github.com/$DOGFOOD_REPO/actions/runs/$RUN_ID)"
       env:
         GH_TOKEN: ${{ inputs.github_token }}
         DOGFOOD_REPO: "NixOS/nix"
-    - name: "Gather system info for experimental installer"
-      shell: bash
-      if: ${{ inputs.experimental-installer == 'true' }}
-      run: |
-        echo "::notice Using experimental installer from 
$EXPERIMENTAL_INSTALLER_REPO (https://github.com/$EXPERIMENTAL_INSTALLER_REPO)"
-
-        if [ "$RUNNER_OS" == "Linux" ]; then
-          EXPERIMENTAL_INSTALLER_SYSTEM="linux"
-          echo "EXPERIMENTAL_INSTALLER_SYSTEM=$EXPERIMENTAL_INSTALLER_SYSTEM" 
>> "$GITHUB_ENV"
-        elif [ "$RUNNER_OS" == "macOS" ]; then
-          EXPERIMENTAL_INSTALLER_SYSTEM="darwin"
-          echo "EXPERIMENTAL_INSTALLER_SYSTEM=$EXPERIMENTAL_INSTALLER_SYSTEM" 
>> "$GITHUB_ENV"
-        else
-          echo "::error ::Unsupported RUNNER_OS: $RUNNER_OS"
-          exit 1
-        fi
-
-        if [ "$RUNNER_ARCH" == "X64" ]; then
-          EXPERIMENTAL_INSTALLER_ARCH=x86_64
-          echo "EXPERIMENTAL_INSTALLER_ARCH=$EXPERIMENTAL_INSTALLER_ARCH" >> 
"$GITHUB_ENV"
-        elif [ "$RUNNER_ARCH" == "ARM64" ]; then
-          EXPERIMENTAL_INSTALLER_ARCH=aarch64
-          echo "EXPERIMENTAL_INSTALLER_ARCH=$EXPERIMENTAL_INSTALLER_ARCH" >> 
"$GITHUB_ENV"
-        else
-          echo "::error ::Unsupported RUNNER_ARCH: $RUNNER_ARCH"
-          exit 1
-        fi
-
-        echo 
"EXPERIMENTAL_INSTALLER_ARTIFACT=nix-installer-$EXPERIMENTAL_INSTALLER_ARCH-$EXPERIMENTAL_INSTALLER_SYSTEM"
 >> "$GITHUB_ENV"
-      env:
-        EXPERIMENTAL_INSTALLER_REPO: "NixOS/experimental-nix-installer"
-    - name: "Download latest experimental installer"
-      shell: bash
-      id: download-latest-experimental-installer
-      if: ${{ inputs.experimental-installer == 'true' && 
inputs.experimental-installer-version == 'latest' }}
-      run: |
-        RUN_ID=$(gh run list --repo "$EXPERIMENTAL_INSTALLER_REPO" --workflow 
ci.yml --branch main --status success --json databaseId --jq ".[0].databaseId")
-
-        
EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR="$GITHUB_WORKSPACE/$EXPERIMENTAL_INSTALLER_ARTIFACT"
-        mkdir -p "$EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR"
-
-        gh run download "$RUN_ID" --repo "$EXPERIMENTAL_INSTALLER_REPO" -n 
"$EXPERIMENTAL_INSTALLER_ARTIFACT" -D "$EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR"
-        # Executable permissions are lost in artifacts
-        find $EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR -type f -exec chmod +x {} +
-        echo "installer-path=$EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR" >> 
"$GITHUB_OUTPUT"
-      env:
-        GH_TOKEN: ${{ inputs.github_token }}
-        EXPERIMENTAL_INSTALLER_REPO: "NixOS/experimental-nix-installer"
     - uses: cachix/install-nix-action@c134e4c9e34bac6cab09cf239815f9339aaaf84e 
# v31.5.1
-      if: ${{ inputs.experimental-installer != 'true' }}
       with:
         # Ternary operator in GHA: 
https://www.github.com/actions/runner/issues/409#issuecomment-752775072
         install_url: ${{ inputs.dogfood == 'true' && format('{0}/install', 
steps.download-nix-installer.outputs.installer-path) || inputs.install_url }}
         install_options: ${{ inputs.dogfood == 'true' && 
format('--tarball-url-prefix {0}', 
steps.download-nix-installer.outputs.installer-path) || '' }}
         extra_nix_config: ${{ inputs.extra_nix_config }}
-    - uses: 
DeterminateSystems/nix-installer-action@786fff0690178f1234e4e1fe9b536e94f5433196
 # v20
-      if: ${{ inputs.experimental-installer == 'true' }}
-      with:
-        diagnostic-endpoint: ""
-        # TODO: It'd be nice to use `artifacts.nixos.org` for both of these, 
maybe through an `/experimental-installer/latest` endpoint? or `/commit/<hash>`?
-        local-root: ${{ inputs.experimental-installer-version == 'latest' && 
steps.download-latest-experimental-installer.outputs.installer-path || '' }}
-        source-url: ${{ inputs.experimental-installer-version != 'latest' && 
'https://artifacts.nixos.org/experimental-installer/tag/${{ 
inputs.experimental-installer-version }}/${{ 
env.EXPERIMENTAL_INSTALLER_ARTIFACT }}' || '' }}
-        nix-package-url: ${{ inputs.dogfood == 'true' && 
steps.download-nix-installer.outputs.tarball-path || (inputs.tarball_url || '') 
}}
-        extra-conf: ${{ inputs.extra_nix_config }}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/nix-2.34.6/.github/workflows/ci.yml 
new/nix-2.34.7/.github/workflows/ci.yml
--- old/nix-2.34.6/.github/workflows/ci.yml     2026-04-11 18:54:54.000000000 
+0200
+++ new/nix-2.34.7/.github/workflows/ci.yml     2026-05-04 19:22:33.000000000 
+0200
@@ -11,7 +11,7 @@
       dogfood:
         description: 'Use dogfood Nix build'
         required: false
-        default: true
+        default: false
         type: boolean
 
 concurrency:
@@ -29,7 +29,7 @@
         fetch-depth: 0
     - uses: ./.github/actions/install-nix-action
       with:
-        dogfood: ${{ github.event_name == 'workflow_dispatch' && 
inputs.dogfood || github.event_name != 'workflow_dispatch' }}
+        dogfood: false
         extra_nix_config:
           experimental-features = nix-command flakes
         github_token: ${{ secrets.GITHUB_TOKEN }}
@@ -43,7 +43,7 @@
       - uses: actions/checkout@v6
       - uses: ./.github/actions/install-nix-action
         with:
-          dogfood: ${{ github.event_name == 'workflow_dispatch' && 
inputs.dogfood || github.event_name != 'workflow_dispatch' }}
+          dogfood: false
           extra_nix_config: experimental-features = nix-command flakes
           github_token: ${{ secrets.GITHUB_TOKEN }}
       - run: ./ci/gha/tests/pre-commit-checks
@@ -93,7 +93,7 @@
     - uses: ./.github/actions/install-nix-action
       with:
         github_token: ${{ secrets.GITHUB_TOKEN }}
-        dogfood: ${{ github.event_name == 'workflow_dispatch' && 
inputs.dogfood || github.event_name != 'workflow_dispatch' }}
+        dogfood: false
         # The sandbox would otherwise be disabled by default on Darwin
         extra_nix_config: "sandbox = true"
     # Since ubuntu 22.30, unprivileged usernamespaces are no longer allowed to 
map to the root user:
@@ -150,7 +150,7 @@
     - uses: ./.github/actions/install-nix-action
       with:
         github_token: ${{ secrets.GITHUB_TOKEN }}
-        dogfood: ${{ github.event_name == 'workflow_dispatch' && 
inputs.dogfood || github.event_name != 'workflow_dispatch' }}
+        dogfood: false
     - name: Run Windows unit tests
       run: |
         nix build --file ci/gha/tests/windows.nix unitTests.nix-util-tests -L
@@ -164,19 +164,19 @@
           - scenario: on ubuntu
             runs-on: ubuntu-24.04
             os: linux
-            experimental-installer: false
+            rust-installer: false
           - scenario: on macos
             runs-on: macos-14
             os: darwin
-            experimental-installer: false
-          - scenario: on ubuntu (experimental)
+            rust-installer: false
+          - scenario: on ubuntu (rust)
             runs-on: ubuntu-24.04
             os: linux
-            experimental-installer: true
-          - scenario: on macos (experimental)
+            rust-installer: true
+          - scenario: on macos (rust)
             runs-on: macos-14
             os: darwin
-            experimental-installer: true
+            rust-installer: true
     name: installer test ${{ matrix.scenario }}
     runs-on: ${{ matrix.runs-on }}
     steps:
@@ -188,22 +188,19 @@
         path: out
     - name: Looking up the installer tarball URL
       id: installer-tarball-url
-      run: |
-        echo "installer-url=file://$GITHUB_WORKSPACE/out" >> "$GITHUB_OUTPUT"
-        TARBALL_PATH="$(find "$GITHUB_WORKSPACE/out" -name 'nix*.tar.xz' 
-print | head -n 1)"
-        echo "tarball-path=file://$TARBALL_PATH" >> "$GITHUB_OUTPUT"
+      run: echo "installer-url=file://$GITHUB_WORKSPACE/out" >> 
"$GITHUB_OUTPUT"
     - uses: cachix/install-nix-action@2126ae7fc54c9df00dd18f7f18754393182c73cd 
# v31.9.1
-      if: ${{ !matrix.experimental-installer }}
+      if: ${{ !matrix.rust-installer }}
       with:
         install_url: ${{ format('{0}/install', 
steps.installer-tarball-url.outputs.installer-url) }}
         install_options: ${{ format('--tarball-url-prefix {0}', 
steps.installer-tarball-url.outputs.installer-url) }}
-    - uses: ./.github/actions/install-nix-action
-      if: ${{ matrix.experimental-installer }}
-      with:
-        dogfood: false
-        experimental-installer: true
-        tarball_url: ${{ steps.installer-tarball-url.outputs.tarball-path }}
-        github_token: ${{ secrets.GITHUB_TOKEN }}
+    - name: Run rust installer
+      if: ${{ matrix.rust-installer }}
+      run: |
+        chmod +x out/nix-installer
+        ./out/nix-installer install --no-confirm
+      env:
+        RUST_BACKTRACE: full
     - run: sudo apt install fish zsh
       if: matrix.os == 'linux'
     - run: brew install fish
@@ -261,7 +258,7 @@
     - uses: ./.github/actions/install-nix-action
       with:
         github_token: ${{ secrets.GITHUB_TOKEN }}
-        dogfood: ${{ github.event_name == 'workflow_dispatch' && 
inputs.dogfood || github.event_name != 'workflow_dispatch' }}
+        dogfood: false
         extra_nix_config: |
           experimental-features = flakes nix-command ca-derivations 
impure-derivations
           max-jobs = 1
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/nix-2.34.6/.github/workflows/upload-release.yml 
new/nix-2.34.7/.github/workflows/upload-release.yml
--- old/nix-2.34.6/.github/workflows/upload-release.yml 2026-04-11 
18:54:54.000000000 +0200
+++ new/nix-2.34.7/.github/workflows/upload-release.yml 2026-05-04 
19:22:33.000000000 +0200
@@ -3,7 +3,7 @@
   workflow_dispatch:
     inputs:
       eval_id:
-        description: "Hydra evaluation ID"
+        description: "Hydra evaluation ID (from the maintenance-X.Y-release 
jobset)"
         required: true
         type: number
       is_latest:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/nix-2.34.6/.version new/nix-2.34.7/.version
--- old/nix-2.34.6/.version     2026-04-11 18:54:54.000000000 +0200
+++ new/nix-2.34.7/.version     2026-05-04 19:22:33.000000000 +0200
@@ -1 +1 @@
-2.34.6
+2.34.7
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/nix-2.34.6/ci/gha/tests/prepare-installer-for-github-actions 
new/nix-2.34.7/ci/gha/tests/prepare-installer-for-github-actions
--- old/nix-2.34.6/ci/gha/tests/prepare-installer-for-github-actions    
2026-04-11 18:54:54.000000000 +0200
+++ new/nix-2.34.7/ci/gha/tests/prepare-installer-for-github-actions    
2026-05-04 19:22:33.000000000 +0200
@@ -2,10 +2,14 @@
 
 set -euo pipefail
 
-nix build -L ".#installerScriptForGHA" ".#binaryTarball"
+nix build -L \
+  ".#installerScriptForGHA" \
+  ".#binaryTarball" \
+  ".#rustInstaller"
 
 mkdir -p out
 cp ./result/install "out/install"
 name="$(basename "$(realpath ./result-1)")"
 # everything before the first dash
 cp -r ./result-1 "out/${name%%-*}"
+cp ./result-2/bin/nix-installer "out/nix-installer"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/nix-2.34.6/flake.nix new/nix-2.34.7/flake.nix
--- old/nix-2.34.6/flake.nix    2026-04-11 18:54:54.000000000 +0200
+++ new/nix-2.34.7/flake.nix    2026-05-04 19:22:33.000000000 +0200
@@ -465,6 +465,9 @@
                 )
               )
             )
+        // lib.optionalAttrs (self.hydraJobs.rustInstaller ? ${system}) {
+          rustInstaller = self.hydraJobs.rustInstaller.${system};
+        }
         // lib.optionalAttrs (builtins.elem system linux64BitSystems) {
           dockerImage =
             let
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/nix-2.34.6/maintainers/release-process.md 
new/nix-2.34.7/maintainers/release-process.md
--- old/nix-2.34.6/maintainers/release-process.md       2026-04-11 
18:54:54.000000000 +0200
+++ new/nix-2.34.7/maintainers/release-process.md       2026-05-04 
19:22:33.000000000 +0200
@@ -81,27 +81,33 @@
   $ git push --set-upstream origin $VERSION-maintenance
   ```
 
-* Create a jobset for the release branch on Hydra as follows:
+* Create two jobsets for the release branch on Hydra:
 
-  * Go to the jobset of the previous release
-  (e.g. https://hydra.nixos.org/jobset/nix/maintenance-2.11).
+  `maintenance-$VERSION` runs the full `hydraJobs` CI matrix.
+  `maintenance-$VERSION-release` builds only the artifacts consumed by
+  `upload-release`, so a release can be cut without waiting on the full
+  matrix. The `-release` suffix keeps the pair adjacent in Hydra's
+  alphabetical jobset list and lets scripts derive one name from the
+  other.
+
+  * Clone the previous `maintenance-*` jobset, set identifier
+    `maintenance-$VERSION`, description `$VERSION release branch`, flake
+    URL `github:NixOS/nix/$VERSION-maintenance`.
+
+  * Clone the previous `maintenance-*-release` jobset (or create a new
+    **legacy** jobset), set identifier `maintenance-$VERSION-release`,
+    description `$VERSION release artifacts`, Nix expression
+    `packaging/release-jobs.nix` in input `src`, and add input `src` of
+    type *Git checkout* pointing at
+    `https://github.com/NixOS/nix $VERSION-maintenance`.
+
+* Wait for the `maintenance-$VERSION-release` jobset to evaluate and
+  build. If impatient, go to the evaluation and select `Actions -> Bump
+  builds to front of queue`. The aggregate job `release` turns green
+  once every required artifact is available.
 
-  * Select `Actions -> Clone this jobset`.
-
-  * Set identifier to `maintenance-$VERSION`.
-
-  * Set description to `$VERSION release branch`.
-
-  * Set flake URL to `github:NixOS/nix/$VERSION-maintenance`.
-
-  * Hit `Create jobset`.
-
-* Wait for the new jobset to evaluate and build. If impatient, go to
-  the evaluation and select `Actions -> Bump builds to front of
-  queue`.
-
-* When the jobset evaluation has succeeded building, take note of the
-  evaluation ID (e.g. `1780832` in
+* When the release jobset evaluation has succeeded building, take note of
+  the evaluation ID (e.g. `1780832` in
   `https://hydra.nixos.org/eval/1780832`).
 
 * Tag the release:
@@ -174,8 +180,9 @@
   $ git push
   ```
 
-* Wait for the desired evaluation of the maintenance jobset to finish
-  building.
+* Wait for the desired evaluation of the `maintenance-XX.YY-release`
+  jobset to finish building (the `release` aggregate job is the gating
+  signal).
 
 * Tag the release
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/nix-2.34.6/maintainers/upload-release.pl 
new/nix-2.34.7/maintainers/upload-release.pl
--- old/nix-2.34.6/maintainers/upload-release.pl        2026-04-11 
18:54:54.000000000 +0200
+++ new/nix-2.34.7/maintainers/upload-release.pl        2026-05-04 
19:22:33.000000000 +0200
@@ -64,7 +64,13 @@
 #print Dumper($evalInfo);
 my $flakeUrl = $evalInfo->{flake};
 my $flakeInfo = decode_json(`nix flake metadata --json "$flakeUrl"` or die) if 
$flakeUrl;
-my $nixRev = ($flakeInfo ? $flakeInfo->{revision} : 
$evalInfo->{jobsetevalinputs}->{nix}->{revision}) or die;
+# Flake jobsets (`maintenance-X.Y`) expose the rev via the flake URL.
+# The release-artifacts jobset (`maintenance-X.Y-release`) is a legacy
+# jobset whose checkout is passed in as input `src`.
+my $nixRev = ($flakeInfo
+              ? $flakeInfo->{revision}
+              : $evalInfo->{jobsetevalinputs}->{src}->{revision}
+                // $evalInfo->{jobsetevalinputs}->{nix}->{revision}) or die;
 
 my $buildInfo = 
decode_json(fetch("$evalUrl/job/build.nix-everything.x86_64-linux", 
'application/json'));
 #print Dumper($buildInfo);
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/nix-2.34.6/packaging/hydra.nix 
new/nix-2.34.7/packaging/hydra.nix
--- old/nix-2.34.6/packaging/hydra.nix  2026-04-11 18:54:54.000000000 +0200
+++ new/nix-2.34.7/packaging/hydra.nix  2026-05-04 19:22:33.000000000 +0200
@@ -254,6 +254,32 @@
     }
   );
 
+  # `NixOS/nix-installer` with this revision's Nix closure embedded.
+  rustInstaller =
+    lib.genAttrs
+      (
+        linux64BitSystems
+        ++ [
+          "x86_64-darwin"
+          "aarch64-darwin"
+        ]
+      )
+      (
+        system:
+        let
+          pkgs = nixpkgsFor.${system}.native;
+          # Embed the native (glibc) Nix even though the Linux installer
+          # binary is static/musl.
+          tarball = pkgs.callPackage ./rust-installer/tarball.nix {
+            nix = pkgs.nixComponents2.nix-everything;
+          };
+          builder = if pkgs.stdenv.hostPlatform.isLinux then pkgs.pkgsStatic 
else pkgs;
+        in
+        builder.callPackage ./rust-installer {
+          inherit tarball;
+        }
+      );
+
   # docker image with Nix inside
   dockerImage = lib.genAttrs linux64BitSystems (system: 
self.packages.${system}.dockerImage);
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/nix-2.34.6/packaging/release-jobs.nix 
new/nix-2.34.7/packaging/release-jobs.nix
--- old/nix-2.34.6/packaging/release-jobs.nix   1970-01-01 01:00:00.000000000 
+0100
+++ new/nix-2.34.7/packaging/release-jobs.nix   2026-05-04 19:22:33.000000000 
+0200
@@ -0,0 +1,81 @@
+# Hydra jobset containing only the artifacts consumed by
+# `maintainers/upload-release.pl`, so a release can be cut without
+# waiting on the full `hydraJobs` CI matrix.
+#
+# Evaluated as a legacy (non-flake) jobset because Hydra hard-codes flake
+# jobsets to `outputs.hydraJobs`; we re-enter the flake via
+# `builtins.getFlake` so derivations stay identical to the flake jobset
+# and share builds through the binary cache.
+#
+# Hydra jobset configuration:
+#   Identifier:      maintenance-<X.Y>-release
+#   Type:            Legacy
+#   Nix expression:  packaging/release-jobs.nix in input `src`
+#   Inputs:
+#     src  (Git checkout)  https://github.com/NixOS/nix <branch>
+{
+  src ? {
+    outPath = ./..;
+  },
+}:
+let
+  # Fetch by GitHub ref rather than the bare store path Hydra hands us,
+  # so `rev`/`lastModified` (and thus the version suffix) match the flake
+  # jobset and derivations are shared.
+  flake = builtins.getFlake (
+    if src ? rev then
+      "github:NixOS/nix/${src.rev}"
+    else
+      # Local evaluation / testing.
+      builtins.unsafeDiscardStringContext (toString src)
+  );
+  inherit (flake) hydraJobs;
+  inherit (flake.inputs.nixpkgs) lib;
+
+  jobs = {
+    # `nix-everything` per system: provides the store paths for
+    # `fallback-paths.nix` and (on x86_64-linux) the rendered manual via
+    # its `doc` output.
+    build.nix-everything = hydraJobs.build.nix-everything;
+    buildCross.nix-everything = {
+      # Only the cross targets that end up in `fallback-paths.nix`.
+      inherit (hydraJobs.buildCross.nix-everything)
+        riscv64-unknown-linux-gnu
+        ;
+    };
+
+    inherit (hydraJobs)
+      manual
+      binaryTarball
+      binaryTarballCross
+      installerScript
+      installerScriptForGHA
+      dockerImage
+      ;
+
+    # Aggregate gating job: green ⇒ every artifact the upload script
+    # needs is available.  `upload-release` can wait on this single job
+    # instead of the whole evaluation.  Constituents are referenced by
+    # job *name* so that an evaluation failure in one of them does not
+    # take down the aggregate's own evaluation.
+    release = 
flake.inputs.nixpkgs.legacyPackages.x86_64-linux.releaseTools.aggregate {
+      name = 
"nix-release-${flake.packages.x86_64-linux.nix-everything.version}";
+      meta.description = "Artifacts required for a Nix release";
+      constituents =
+        let
+          collectJobNames =
+            prefix: x:
+            if lib.isDerivation x then
+              [ prefix ]
+            else if lib.isAttrs x then
+              lib.concatLists (
+                lib.mapAttrsToList (n: collectJobNames (if prefix == "" then n 
else "${prefix}.${n}")) x
+              )
+            else
+              [ ];
+        in
+        collectJobNames "" (builtins.removeAttrs jobs [ "release" ]);
+    };
+  };
+in
+jobs
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/nix-2.34.6/packaging/rust-installer/default.nix 
new/nix-2.34.7/packaging/rust-installer/default.nix
--- old/nix-2.34.6/packaging/rust-installer/default.nix 1970-01-01 
01:00:00.000000000 +0100
+++ new/nix-2.34.7/packaging/rust-installer/default.nix 2026-05-04 
19:22:33.000000000 +0200
@@ -0,0 +1,88 @@
+# `NixOS/nix-installer` built with *this* Nix closure embedded, so
+# Hydra/CI can dogfood the Rust installer without the (removed)
+# `--nix-package-url` knob.
+{
+  lib,
+  stdenv,
+  buildPackages,
+  runCommand,
+  rustPlatform,
+  fetchFromGitHub,
+  tarball,
+}:
+
+let
+  installerVersion = "2.34.6";
+  src = fetchFromGitHub {
+    owner = "NixOS";
+    repo = "nix-installer";
+    tag = installerVersion;
+    hash = "sha256-aTaz8EtHexvke7tGr5MfeKy9g7AraIAFN+dPApm+fds=";
+  };
+
+  # Bare binary: no Nix closure yet.  Appended below via `pack`, so the
+  # (expensive) Rust compile is independent of the embedded Nix and
+  # stays cacheable across Nix revisions.
+  bare = rustPlatform.buildRustPackage {
+    pname = "nix-installer-bare";
+    version = installerVersion;
+
+    inherit src;
+
+    cargoHash = "sha256-/mNXkeZVuYsqd0TiUa7bzSP4xpKh0Fqga9EpasPbrzU=";
+
+    doCheck = false;
+
+    env = lib.optionalAttrs stdenv.hostPlatform.isDarwin {
+      # Drop the unused libiconv dylib the darwin stdenv injects; the
+      # binary must run before `/nix/store` exists.
+      NIX_LDFLAGS = "-dead_strip_dylibs";
+    };
+
+    postInstall = ''
+      install -m755 nix-installer.sh $out/bin/nix-installer.sh
+    '';
+  };
+in
+
+runCommand "nix-installer-${tarball.passthru.nixVersion}"
+  {
+    nativeBuildInputs = [
+      buildPackages.python3
+    ]
+    ++ lib.optionals stdenv.hostPlatform.isDarwin [
+      buildPackages.darwin.sigtool
+      buildPackages.darwin.cctools
+    ];
+
+    # The appended payload contains store-path strings on purpose; don't
+    # let the reference scanner pull the whole Nix closure into this
+    # derivation's runtime closure.
+    __structuredAttrs = true;
+    unsafeDiscardReferences.out = true;
+
+    passthru = { inherit bare; };
+
+    meta = {
+      description = "Rust-based Nix installer with an embedded Nix 
${tarball.passthru.nixVersion}";
+      homepage = "https://github.com/NixOS/nix-installer";;
+      license = lib.licenses.lgpl21Only;
+      mainProgram = "nix-installer";
+    };
+  }
+  ''
+    mkdir -p $out/bin $out/nix-support
+
+    python3 ${src}/scripts/pack \
+      --input ${bare}/bin/nix-installer \
+      --tarball ${tarball}/nix.tar.zst \
+      --nix-store-path ${tarball.passthru.nixStorePath} \
+      --cacert-store-path ${tarball.passthru.cacertStorePath} \
+      --nix-version ${tarball.passthru.nixVersion} \
+      --output $out/bin/nix-installer
+
+    install -m755 ${bare}/bin/nix-installer.sh $out/bin/nix-installer.sh
+
+    echo "file binary-dist $out/bin/nix-installer" >> 
$out/nix-support/hydra-build-products
+    echo "file binary-dist $out/bin/nix-installer.sh" >> 
$out/nix-support/hydra-build-products
+  ''
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/nix-2.34.6/packaging/rust-installer/tarball.nix 
new/nix-2.34.7/packaging/rust-installer/tarball.nix
--- old/nix-2.34.6/packaging/rust-installer/tarball.nix 1970-01-01 
01:00:00.000000000 +0100
+++ new/nix-2.34.7/packaging/rust-installer/tarball.nix 2026-05-04 
19:22:33.000000000 +0200
@@ -0,0 +1,50 @@
+# Zstd-compressed Nix closure in the layout expected by
+# `NixOS/nix-installer` (`include_bytes!` at build time).
+{
+  lib,
+  stdenv,
+  runCommand,
+  buildPackages,
+  zstd,
+  nix,
+  cacert,
+}:
+
+let
+  installerClosureInfo = buildPackages.closureInfo {
+    rootPaths = [
+      nix
+      cacert
+    ];
+  };
+in
+
+runCommand "nix-installer-tarball-${nix.version}"
+  {
+    nativeBuildInputs = [ zstd ];
+
+    passthru = {
+      nixStorePath = nix.outPath;
+      cacertStorePath = cacert.outPath;
+      nixVersion = nix.version;
+    };
+  }
+  ''
+    mkdir -p $out
+
+    dir=nix-${nix.version}-${stdenv.hostPlatform.system}
+
+    cp ${installerClosureInfo}/registration $TMPDIR/reginfo
+
+    tar cf - \
+      --sort=name \
+      --owner=0 --group=0 --mode=u+rw,uga+r \
+      --mtime='1970-01-01' \
+      --absolute-names \
+      --hard-dereference \
+      --transform "s,$TMPDIR/reginfo,$dir/.reginfo," \
+      --transform "s,$NIX_STORE,$dir/store,S" \
+      $TMPDIR/reginfo \
+      $(cat ${installerClosureInfo}/store-paths) \
+      | zstd -19 -T1 -o $out/nix.tar.zst
+  ''
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/nix-2.34.6/src/libcmd/include/nix/cmd/unix-socket-server.hh 
new/nix-2.34.7/src/libcmd/include/nix/cmd/unix-socket-server.hh
--- old/nix-2.34.6/src/libcmd/include/nix/cmd/unix-socket-server.hh     
2026-04-11 18:54:54.000000000 +0200
+++ new/nix-2.34.7/src/libcmd/include/nix/cmd/unix-socket-server.hh     
2026-05-04 19:22:33.000000000 +0200
@@ -55,6 +55,8 @@
     mode_t socketMode = 0666;
 };
 
+MakeError(AbortServeSocket, BaseError);
+
 /**
  * Run a server loop that accepts connections and calls the handler for each.
  *
@@ -70,6 +72,7 @@
  *
  * This function never returns normally. It runs until interrupted
  * (e.g., via SIGINT), at which point it throws `Interrupted`.
+ * Can be explicitly exited by throwing AbortServeSocket.
  *
  * @param options Configuration for the server.
  * @param handler Callback invoked for each accepted connection.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/nix-2.34.6/src/libcmd/unix/unix-socket-server.cc 
new/nix-2.34.7/src/libcmd/unix/unix-socket-server.cc
--- old/nix-2.34.6/src/libcmd/unix/unix-socket-server.cc        2026-04-11 
18:54:54.000000000 +0200
+++ new/nix-2.34.7/src/libcmd/unix/unix-socket-server.cc        2026-05-04 
19:22:33.000000000 +0200
@@ -114,6 +114,9 @@
                 handler(std::move(remote), [&]() { listeningSockets.clear(); 
});
             }
 
+        } catch (AbortServeSocket &) {
+            /* Explicitly aborted, bail out. */
+            throw;
         } catch (Error & error) {
             auto ei = error.info();
             // FIXME: add to trace?
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/nix-2.34.6/src/libutil/archive.cc 
new/nix-2.34.7/src/libutil/archive.cc
--- old/nix-2.34.6/src/libutil/archive.cc       2026-04-11 18:54:54.000000000 
+0200
+++ new/nix-2.34.7/src/libutil/archive.cc       2026-05-04 19:22:33.000000000 
+0200
@@ -32,6 +32,12 @@
 
 static GlobalConfig::Register rArchiveSettings(&archiveSettings);
 
+/* Maximum directory nesting depth for dumpPath()/parseDump(). Bounds
+   stack usage so deep trees cannot overflow the (possibly coroutine)
+   stack these run on. Chosen to fit comfortably in the default 128 KiB
+   boost coroutine stack. */
+static constexpr size_t narMaxDepth = 64;
+
 PathFilter defaultPathFilter = [](const std::string &) { return true; };
 
 void SourceAccessor::dumpPath(const CanonPath & path, Sink & sink, PathFilter 
& filter)
@@ -49,9 +55,12 @@
 
     sink << narVersionMagic1;
 
-    [&, &this_(*this)](this const auto & dump, const CanonPath & path) -> void 
{
+    [&, &this_(*this)](this const auto & dump, const CanonPath & path, size_t 
depth) -> void {
         checkInterrupt();
 
+        if (depth >= narMaxDepth)
+            throw Error("path '%s' exceeds maximum NAR directory depth of %d", 
this_.showPath(path), narMaxDepth);
+
         auto st = this_.lstat(path);
 
         sink << "(";
@@ -86,7 +95,7 @@
             for (auto & i : unhacked)
                 if (filter((path / i.first).abs())) {
                     sink << "entry" << "(" << "name" << i.first << "node";
-                    dump(path / i.second);
+                    dump(path / i.second, depth + 1);
                     sink << ")";
                 }
         }
@@ -98,7 +107,7 @@
             throw Error("file '%s' has an unsupported type", path);
 
         sink << ")";
-    }(path);
+    }(path, 0);
 }
 
 time_t dumpPathAndGetMtime(const std::filesystem::path & path, Sink & sink, 
PathFilter & filter)
@@ -159,35 +168,47 @@
     }
 };
 
-static void parse(FileSystemObjectSink & sink, Source & source, const 
CanonPath & path)
+static void parse(FileSystemObjectSink & sink, Source & source, const 
CanonPath & path, size_t depth)
 {
-    auto getString = [&]() {
+    if (depth >= narMaxDepth)
+        throw badArchive("NAR directory nesting exceeds maximum depth of %d", 
narMaxDepth);
+
+    /* NAR keywords are all <= 10 bytes; a little slack keeps error
+       messages useful for short garbage without allowing large
+       allocations. */
+    constexpr size_t narMaxTag = 32;
+    /* Format-defined bounds, intentionally independent of host
+       NAME_MAX/PATH_MAX. */
+    constexpr size_t narMaxName = 255;
+    constexpr size_t narMaxTarget = 4095;
+
+    auto getString = [&](size_t max) {
         checkInterrupt();
-        return readString(source);
+        return readString(source, max);
     };
 
     auto expectTag = [&](std::string_view expected) {
-        auto tag = getString();
+        auto tag = getString(narMaxTag);
         if (tag != expected)
-            throw badArchive("expected tag '%s', got '%s'", expected, 
tag.substr(0, 1024));
+            throw badArchive("expected tag '%s', got '%s'", expected, tag);
     };
 
     expectTag("(");
 
     expectTag("type");
 
-    auto type = getString();
+    auto type = getString(narMaxTag);
 
     if (type == "regular") {
         sink.createRegularFile(path, [&](auto & crf) {
-            auto tag = getString();
+            auto tag = getString(narMaxTag);
 
             if (tag == "executable") {
-                auto s2 = getString();
+                auto s2 = getString(0);
                 if (s2 != "")
                     throw badArchive("executable marker has non-empty value");
                 crf.isExecutable();
-                tag = getString();
+                tag = getString(narMaxTag);
             }
 
             if (tag != "contents")
@@ -206,7 +227,7 @@
             std::string prevName;
 
             while (1) {
-                auto tag = getString();
+                auto tag = getString(narMaxTag);
 
                 if (tag == ")")
                     break;
@@ -218,7 +239,7 @@
 
                 expectTag("name");
 
-                auto name = getString();
+                auto name = getString(narMaxName);
                 if (name.empty() || name == "." || name == ".." || 
name.find('/') != std::string::npos
                     || name.find((char) 0) != std::string::npos)
                     throw badArchive("NAR contains invalid file name '%1%'", 
name);
@@ -243,7 +264,7 @@
 
                 expectTag("node");
 
-                parse(dirSink, source, relDirPath / name);
+                parse(dirSink, source, relDirPath / name, depth + 1);
 
                 expectTag(")");
             }
@@ -253,7 +274,9 @@
     else if (type == "symlink") {
         expectTag("target");
 
-        auto target = getString();
+        auto target = getString(narMaxTarget);
+        if (target.empty() || target.find((char) 0) != std::string::npos)
+            throw badArchive("NAR contains invalid symlink target");
         sink.createSymlink(path, target);
 
         expectTag(")");
@@ -274,7 +297,7 @@
     }
     if (version != narVersionMagic1)
         throw badArchive("input doesn't look like a Nix archive");
-    parse(sink, source, CanonPath::root);
+    parse(sink, source, CanonPath::root, 0);
 }
 
 void restorePath(const std::filesystem::path & path, Source & source, bool 
startFsync)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/nix-2.34.6/src/libutil/serialise.cc 
new/nix-2.34.7/src/libutil/serialise.cc
--- old/nix-2.34.6/src/libutil/serialise.cc     2026-04-11 18:54:54.000000000 
+0200
+++ new/nix-2.34.7/src/libutil/serialise.cc     2026-05-04 19:22:33.000000000 
+0200
@@ -10,6 +10,7 @@
 #include <memory>
 
 #include <boost/coroutine2/coroutine.hpp>
+#include <boost/coroutine2/protected_fixedsize_stack.hpp>
 
 #ifdef _WIN32
 #  include <fileapi.h>
@@ -327,20 +328,21 @@
             cur = in;
 
             if (!coro) {
-                coro = coro_t::push_type([&](coro_t::pull_type & yield) {
-                    LambdaSource source([&](char * out, size_t out_len) {
-                        if (cur.empty()) {
-                            yield();
-                            if (yield.get())
-                                throw EndOfFile("coroutine has finished");
-                        }
-
-                        size_t n = cur.copy(out, out_len);
-                        cur.remove_prefix(n);
-                        return n;
+                coro =
+                    
coro_t::push_type(boost::coroutines2::protected_fixedsize_stack(), 
[&](coro_t::pull_type & yield) {
+                        LambdaSource source([&](char * out, size_t out_len) {
+                            if (cur.empty()) {
+                                yield();
+                                if (yield.get())
+                                    throw EndOfFile("coroutine has finished");
+                            }
+
+                            size_t n = cur.copy(out, out_len);
+                            cur.remove_prefix(n);
+                            return n;
+                        });
+                        reader(source);
                     });
-                    reader(source);
-                });
             }
 
             if (!*coro) {
@@ -384,14 +386,15 @@
         {
             bool hasCoro = coro.has_value();
             if (!hasCoro) {
-                coro = coro_t::pull_type([&](coro_t::push_type & yield) {
-                    LambdaSink sink([&](std::string_view data) {
-                        if (!data.empty()) {
-                            yield(data);
-                        }
+                coro =
+                    
coro_t::pull_type(boost::coroutines2::protected_fixedsize_stack(), 
[&](coro_t::push_type & yield) {
+                        LambdaSink sink([&](std::string_view data) {
+                            if (!data.empty()) {
+                                yield(data);
+                            }
+                        });
+                        writer(sink);
                     });
-                    writer(sink);
-                });
             }
 
             if (cur.empty()) {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/nix-2.34.6/src/libutil/tarfile.cc 
new/nix-2.34.7/src/libutil/tarfile.cc
--- old/nix-2.34.6/src/libutil/tarfile.cc       2026-04-11 18:54:54.000000000 
+0200
+++ new/nix-2.34.7/src/libutil/tarfile.cc       2026-05-04 19:22:33.000000000 
+0200
@@ -5,6 +5,7 @@
 #include "nix/util/serialise.hh"
 #include "nix/util/tarfile.hh"
 #include "nix/util/file-system.hh"
+#include "nix/util/os-string.hh"
 
 namespace nix {
 
@@ -123,6 +124,12 @@
         archive_read_free(this->archive);
 }
 
+#ifndef _WIN32
+#  define NIX_LIBARCHIVE_NATIVE_PATH_FUNC(func) func
+#else
+#  define NIX_LIBARCHIVE_NATIVE_PATH_FUNC(func) func##_w
+#endif
+
 static void extract_archive(TarArchive & archive, const std::filesystem::path 
& destDir)
 {
     int flags = ARCHIVE_EXTRACT_TIME | ARCHIVE_EXTRACT_SECURE_SYMLINKS | 
ARCHIVE_EXTRACT_SECURE_NODOTDOT;
@@ -132,24 +139,34 @@
         int r = archive_read_next_header(archive.archive, &entry);
         if (r == ARCHIVE_EOF)
             break;
-        auto name = archive_entry_pathname(entry);
-        if (!name)
-            throw Error("cannot get archive member name: %s", 
archive_error_string(archive.archive));
-        if (r == ARCHIVE_WARN)
-            warn("getting archive member '%1%': %2%", name, 
archive_error_string(archive.archive));
-        else
-            archive.check(r);
 
-        archive_entry_copy_pathname(entry, (destDir / name).string().c_str());
+        const auto relPath = [&]() -> std::filesystem::path {
+            /* Some archives might lack a pathname 
https://github.com/libarchive/libarchive/issues/2089. */
+            auto * name = 
NIX_LIBARCHIVE_NATIVE_PATH_FUNC(archive_entry_pathname)(entry);
+            if (!name)
+                throw Error("cannot get archive member name: %s", 
archive_error_string(archive.archive));
+            if (r == ARCHIVE_WARN)
+                warn(
+                    "getting archive member '%1%': %2%",
+                    os_string_to_string(OsStringView(name)),
+                    archive_error_string(archive.archive));
+            else
+                archive.check(r);
+
+            return std::filesystem::path(name).relative_path();
+        }();
+
+        NIX_LIBARCHIVE_NATIVE_PATH_FUNC(archive_entry_copy_pathname)(entry, 
(destDir / relPath).c_str());
 
         // sources can and do contain dirs with no rx bits
         if (archive_entry_filetype(entry) == AE_IFDIR && 
(archive_entry_mode(entry) & 0500) != 0500)
             archive_entry_set_mode(entry, archive_entry_mode(entry) | 0500);
 
         // Patch hardlink path
-        const char * original_hardlink = archive_entry_hardlink(entry);
-        if (original_hardlink) {
-            archive_entry_copy_hardlink(entry, (destDir / 
original_hardlink).string().c_str());
+        const auto * originalHardlink = 
NIX_LIBARCHIVE_NATIVE_PATH_FUNC(archive_entry_hardlink)(entry);
+        if (originalHardlink) {
+            auto hardlinkPath = 
std::filesystem::path(originalHardlink).relative_path();
+            
NIX_LIBARCHIVE_NATIVE_PATH_FUNC(archive_entry_copy_hardlink)(entry, (destDir / 
hardlinkPath).c_str());
         }
 
         archive.check(archive_read_extract(archive.archive, entry, flags));
@@ -158,6 +175,8 @@
     archive.close();
 }
 
+#undef NIX_LIBARCHIVE_NATIVE_PATH_FUNC
+
 void unpackTarfile(const std::filesystem::path & tarFile, const 
std::filesystem::path & destDir)
 {
     auto archive = TarArchive(tarFile);
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/nix-2.34.6/src/nix/unix/daemon.cc 
new/nix-2.34.7/src/nix/unix/daemon.cc
--- old/nix-2.34.6/src/nix/unix/daemon.cc       2026-04-11 18:54:54.000000000 
+0200
+++ new/nix-2.34.7/src/nix/unix/daemon.cc       2026-05-04 19:22:33.000000000 
+0200
@@ -120,13 +120,41 @@
 }
 #endif
 
+/* Check for anything that might be a crash. Too many crashes aren't
+   supposed to happen and we should limit the amount if someone is
+   intentionally triggering those as an ASLR bypass attempt (each forked
+   daemon worker has the same address space layout as we do). TODO: Ideally
+   we'd re-exec the daemon worker so that it gets a fresh address space
+   for each connection. Alternatively, we could make the daemon socket use
+   Accept=yes systemd.socket(5). */
+std::atomic<unsigned> crashCount = 0;
+
+/* Sanity check that using the atomic counter is fine in the signal handler. */
+static_assert(crashCount.is_always_lock_free);
+
+/* For now we are just limiting the number of crashes experienced by this
+   daemon instance. systemd (e.g.) would restart us, which would get us
+   a fresh address space layout - which is exactly what we want in case
+   someone is intentionally crashing the daemon to brute-force ASLR. */
+static constexpr unsigned crashLimit = 64;
+
 static void sigChldHandler(int sigNo)
 {
     // Ensure we don't modify errno of whatever we've interrupted
     auto saved_errno = errno;
     //  Reap all dead children.
-    while (waitpid(-1, 0, WNOHANG) > 0)
-        ;
+    int status;
+    while (waitpid(-1, &status, WNOHANG) > 0) {
+        if (!WIFSIGNALED(status))
+            continue;
+        int sig = WTERMSIG(status);
+        for (auto i : {SIGILL, SIGSEGV, SIGBUS, SIGABRT, SIGSYS, SIGFPE}) {
+            if (sig == i) {
+                ++crashCount;
+                break;
+            }
+        }
+    }
     errno = saved_errno;
 }
 
@@ -274,6 +302,9 @@
                 .socketMode = 0666,
             },
             [&](AutoCloseFD remote, std::function<void()> closeListeners) {
+                if (crashCount >= crashLimit)
+                    throw unix::AbortServeSocket("too many daemon worker 
crashes (%1%)", crashLimit);
+
                 unix::closeOnExec(remote.get());
 
                 unix::PeerInfo peer;

Reply via email to