Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package rqlite for openSUSE:Factory checked 
in at 2026-01-05 14:51:32
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/rqlite (Old)
 and      /work/SRC/openSUSE:Factory/.rqlite.new.1928 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "rqlite"

Mon Jan  5 14:51:32 2026 rev:37 rq:1325249 version:9.3.9

Changes:
--------
--- /work/SRC/openSUSE:Factory/rqlite/rqlite.changes    2025-12-24 
13:17:55.414453719 +0100
+++ /work/SRC/openSUSE:Factory/.rqlite.new.1928/rqlite.changes  2026-01-05 
14:52:18.544152372 +0100
@@ -1,0 +2,19 @@
+Sun Jan  4 09:09:54 UTC 2026 - Andreas Stieger <[email protected]>
+
+- Update to version 9.3.9:
+  * Handle possible WAL checkpoint failure
+  * Add QueryWithContext() to DB layer
+  * Unit test forced checkpoint and truncation at Store level
+  * Improve MSRW Error structure
+  * Snapshot Sink sets owner when taking MSRW
+  * Log cancelled Snapshot Store LockingSinks
+  * Upgrade dependencies
+
+-------------------------------------------------------------------
+Sat Jan  3 15:50:05 UTC 2026 - Andreas Stieger <[email protected]>
+
+- Update to version 9.3.7:
+  * Improve FSM Snapshot logging
+  * Upgrade SQLite
+
+-------------------------------------------------------------------

Old:
----
  rqlite-9.3.6.tar.xz

New:
----
  rqlite-9.3.9.tar.xz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ rqlite.spec ++++++
--- /var/tmp/diff_new_pack.YHC0bk/_old  2026-01-05 14:52:20.824247334 +0100
+++ /var/tmp/diff_new_pack.YHC0bk/_new  2026-01-05 14:52:20.828247500 +0100
@@ -1,7 +1,7 @@
 #
 # spec file for package rqlite
 #
-# Copyright (c) 2025 Andreas Stieger <[email protected]>
+# Copyright (c) 2026 Andreas Stieger <[email protected]>
 #
 # All modifications and additions to the file contributed by third parties
 # remain the property of their copyright owners, unless otherwise agreed
@@ -17,7 +17,7 @@
 
 
 Name:           rqlite
-Version:        9.3.6
+Version:        9.3.9
 Release:        0
 Summary:        Distributed relational database built on SQLite
 License:        MIT

++++++ _service ++++++
--- /var/tmp/diff_new_pack.YHC0bk/_old  2026-01-05 14:52:20.876249500 +0100
+++ /var/tmp/diff_new_pack.YHC0bk/_new  2026-01-05 14:52:20.884249833 +0100
@@ -3,7 +3,7 @@
     <param name="url">https://github.com/rqlite/rqlite.git</param>
     <param name="scm">git</param>
     <param name="exclude">.git</param>
-    <param name="revision">v9.3.6</param>
+    <param name="revision">v9.3.9</param>
     <param name="versionformat">@PARENT_TAG@</param>
     <param name="changesgenerate">enable</param>
     <param name="versionrewrite-pattern">v(.*)</param>

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.YHC0bk/_old  2026-01-05 14:52:20.912251000 +0100
+++ /var/tmp/diff_new_pack.YHC0bk/_new  2026-01-05 14:52:20.920251332 +0100
@@ -1,7 +1,7 @@
 <servicedata>
   <service name="tar_scm">
     <param name="url">https://github.com/rqlite/rqlite.git</param>
-    <param 
name="changesrevision">f3c66dce6525932b7a807763feff03a9af86d371</param>
+    <param 
name="changesrevision">3c0f61cc18e55f070246a451084a9fa49563c777</param>
   </service>
 </servicedata>
 (No newline at EOF)

++++++ rqlite-9.3.6.tar.xz -> rqlite-9.3.9.tar.xz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.3.6/CHANGELOG.md 
new/rqlite-9.3.9/CHANGELOG.md
--- old/rqlite-9.3.6/CHANGELOG.md       2025-12-23 16:05:26.000000000 +0100
+++ new/rqlite-9.3.9/CHANGELOG.md       2026-01-04 08:09:34.000000000 +0100
@@ -1,3 +1,21 @@
+## v9.3.9 (January 4th 2026)
+### Implementation changes and bug fixes
+- [PR #2423](https://github.com/rqlite/rqlite/pull/2423): Handle possible WAL 
checkpoint failure.
+- [PR #2424](https://github.com/rqlite/rqlite/pull/2424): Add 
`QueryWithContext()` to DB layer.
+- [PR #2425](https://github.com/rqlite/rqlite/pull/2425): Unit test forced 
checkpoint and truncation at Store level.
+
+## v9.3.8 (January 3rd 2026)
+### Implementation changes and bug fixes
+- [PR #2419](https://github.com/rqlite/rqlite/pull/2419): Improve MSRW Error 
structure.
+- [PR #2420](https://github.com/rqlite/rqlite/pull/2420): Snapshot Sink sets 
owner when taking MSRW.
+- [PR #2421](https://github.com/rqlite/rqlite/pull/2421): Log cancelled 
Snapshot Store LockingSinks.
+- [PR #2422](https://github.com/rqlite/rqlite/pull/2422): Upgrade dependencies.
+
+## v9.3.7 (January 2nd 2026)
+### Implementation changes and bug fixes
+- [PR #2415](https://github.com/rqlite/rqlite/pull/2415): Upgrade SQLite to 
3.51.1.
+- [PR #2416](https://github.com/rqlite/rqlite/pull/2416): Improve FSM Snapshot 
logging.
+
 ## v9.3.6 (December 23rd 2025)
 ### Implementation changes and bug fixes
 - [PR #2404](https://github.com/rqlite/rqlite/pull/2404), [PR 
#2408](https://github.com/rqlite/rqlite/pull/2408): Add basic end-to-end 
testing of the rqlite shell.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.3.6/Dockerfile new/rqlite-9.3.9/Dockerfile
--- old/rqlite-9.3.6/Dockerfile 2025-12-23 16:05:26.000000000 +0100
+++ new/rqlite-9.3.9/Dockerfile 2026-01-04 08:09:34.000000000 +0100
@@ -11,6 +11,7 @@
     gettext \
     git \
     icu-dev \
+    jq \
     linux-headers \
     make \
     musl-dev \
@@ -35,17 +36,26 @@
 WORKDIR /extensions
 WORKDIR /app
 
-RUN curl -L `curl -s 
https://api.github.com/repos/nalgeon/sqlean/releases/latest | grep 
"tarball_url" | cut -d '"' -f 4` -o sqlean.tar.gz && \
-    tar xvfz sqlean.tar.gz && \
+RUN set -e; \
+    sqlean_url=$(curl -s 
https://api.github.com/repos/nalgeon/sqlean/releases/latest | jq -r 
.tarball_url); \
+    echo "Downloading sqlean from: $sqlean_url"; \
+    curl -L "$sqlean_url" -o sqlean.tar.gz
+RUN tar xvfz sqlean.tar.gz && \
     cd nalgeon* && make prepare-dist download-sqlite download-external 
compile-linux && zip -j /extensions/sqlean.zip dist/sqlean.so
 
-RUN curl -L `curl -s 
https://api.github.com/repos/asg017/sqlite-vec/releases/latest | grep 
"tarball_url" | cut -d '"' -f 4` -o sqlite-vec.tar.gz && \
-    tar xvfz sqlite-vec.tar.gz && \
+RUN set -e; \
+    sqlitevec_url=$(curl -s 
https://api.github.com/repos/asg017/sqlite-vec/releases/latest | jq -r 
.tarball_url); \
+    echo "Downloading sqlite-vec from: $sqlitevec_url"; \
+    curl -L "$sqlitevec_url" -o sqlite-vec.tar.gz
+RUN tar xvfz sqlite-vec.tar.gz && \
     echo location >> ~/.curlrc && \
     cd asg017* && sh scripts/vendor.sh && echo "#include <sys/types.h>" | cat 
- sqlite-vec.c > temp && mv temp sqlite-vec.c && make loadable && zip -j 
/extensions/sqlite-vec.zip dist/vec0.so
 
-RUN curl -L `curl -s 
https://api.github.com/repos/sqliteai/sqlite-vector/releases/latest | grep 
"tarball_url" | cut -d '"' -f 4` -o sqliteai-vector.tar.gz && \
-    tar xvfz sqliteai-vector.tar.gz && rm sqliteai-vector.tar.gz && \
+RUN set -e; \
+    sqliteai_vector=$(curl -s 
https://api.github.com/repos/sqliteai/sqlite-vector/releases/latest | jq -r 
.tarball_url); \
+    echo "Downloading sqliteai-vector from: $sqliteai_vector"; \
+    curl -L "$sqliteai_vector" -o sqliteai-vector.tar.gz
+RUN tar xvfz sqliteai-vector.tar.gz && rm sqliteai-vector.tar.gz && \
     cd sqliteai* && make && zip -j /extensions/sqliteai-vector.zip 
dist/vector.so
 
 RUN git clone https://github.com/rqlite/rqlite-sqlite-ext.git
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.3.6/LICENSE new/rqlite-9.3.9/LICENSE
--- old/rqlite-9.3.6/LICENSE    2025-12-23 16:05:26.000000000 +0100
+++ new/rqlite-9.3.9/LICENSE    2026-01-04 08:09:34.000000000 +0100
@@ -1,6 +1,6 @@
 The MIT License (MIT)
  
-Copyright (c) Philip O'Toole (https://www.philipotoole.com) 2015 - 2025
+Copyright (c) Philip O'Toole (https://www.philipotoole.com) 2015 - 2026
 
 Permission is hereby granted, free of charge, to any person obtaining a copy
 of this software and associated documentation files (the "Software"), to deal
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.3.6/README.md new/rqlite-9.3.9/README.md
--- old/rqlite-9.3.6/README.md  2025-12-23 16:05:26.000000000 +0100
+++ new/rqlite-9.3.9/README.md  2026-01-04 08:09:34.000000000 +0100
@@ -5,7 +5,6 @@
 </picture>
 
 [![Circle 
CI](https://circleci.com/gh/rqlite/rqlite/tree/master.svg?style=svg)](https://circleci.com/gh/rqlite/rqlite/tree/master)
-[![AppVeyor](https://ci.appveyor.com/api/projects/status/github/rqlite/rqlite?branch=master&svg=true)](https://ci.appveyor.com/project/otoolep/rqlite)
 
[![Docker](https://img.shields.io/docker/pulls/rqlite/rqlite?style=plastic)](https://hub.docker.com/r/rqlite/rqlite/)
 [![Office 
Hours](https://img.shields.io/badge/Office%20Hours--yellow.svg)](https://rqlite.io/office-hours)
 
[![Slack](https://img.shields.io/badge/Slack--purple.svg)](https://www.rqlite.io/join-slack)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.3.6/cluster/proto/message.pb.go 
new/rqlite-9.3.9/cluster/proto/message.pb.go
--- old/rqlite-9.3.6/cluster/proto/message.pb.go        2025-12-23 
16:05:26.000000000 +0100
+++ new/rqlite-9.3.9/cluster/proto/message.pb.go        2026-01-04 
08:09:34.000000000 +0100
@@ -1,6 +1,6 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
 // versions:
-//     protoc-gen-go v1.36.10
+//     protoc-gen-go v1.36.11
 //     protoc        v6.33.0
 // source: message.proto
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.3.6/command/proto/command.pb.go 
new/rqlite-9.3.9/command/proto/command.pb.go
--- old/rqlite-9.3.6/command/proto/command.pb.go        2025-12-23 
16:05:26.000000000 +0100
+++ new/rqlite-9.3.9/command/proto/command.pb.go        2026-01-04 
08:09:34.000000000 +0100
@@ -1,6 +1,6 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
 // versions:
-//     protoc-gen-go v1.36.10
+//     protoc-gen-go v1.36.11
 //     protoc        v6.33.0
 // source: command.proto
 
@@ -439,6 +439,7 @@
        Sql           string                 
`protobuf:"bytes,1,opt,name=sql,proto3" json:"sql,omitempty"`
        Parameters    []*Parameter           
`protobuf:"bytes,2,rep,name=parameters,proto3" json:"parameters,omitempty"`
        ForceQuery    bool                   
`protobuf:"varint,3,opt,name=forceQuery,proto3" json:"forceQuery,omitempty"`
+       ForceStall    bool                   
`protobuf:"varint,4,opt,name=forceStall,proto3" json:"forceStall,omitempty"`
        unknownFields protoimpl.UnknownFields
        sizeCache     protoimpl.SizeCache
 }
@@ -494,6 +495,13 @@
        return false
 }
 
+func (x *Statement) GetForceStall() bool {
+       if x != nil {
+               return x.ForceStall
+       }
+       return false
+}
+
 type Request struct {
        state           protoimpl.MessageState `protogen:"open.v1"`
        Transaction     bool                   
`protobuf:"varint,1,opt,name=transaction,proto3" json:"transaction,omitempty"`
@@ -2086,7 +2094,7 @@
        "\x01y\x18\x04 \x01(\fH\x00R\x01y\x12\x0e\n" +
        "\x01s\x18\x05 \x01(\tH\x00R\x01s\x12\x12\n" +
        "\x04name\x18\x06 \x01(\tR\x04nameB\a\n" +
-       "\x05value\"q\n" +
+       "\x05value\"\x91\x01\n" +
        "\tStatement\x12\x10\n" +
        "\x03sql\x18\x01 \x01(\tR\x03sql\x122\n" +
        "\n" +
@@ -2094,7 +2102,10 @@
        "parameters\x12\x1e\n" +
        "\n" +
        "forceQuery\x18\x03 \x01(\bR\n" +
-       "forceQuery\"\xa7\x01\n" +
+       "forceQuery\x12\x1e\n" +
+       "\n" +
+       "forceStall\x18\x04 \x01(\bR\n" +
+       "forceStall\"\xa7\x01\n" +
        "\aRequest\x12 \n" +
        "\vtransaction\x18\x01 \x01(\bR\vtransaction\x122\n" +
        "\n" +
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.3.6/command/proto/command.proto 
new/rqlite-9.3.9/command/proto/command.proto
--- old/rqlite-9.3.6/command/proto/command.proto        2025-12-23 
16:05:26.000000000 +0100
+++ new/rqlite-9.3.9/command/proto/command.proto        2026-01-04 
08:09:34.000000000 +0100
@@ -19,6 +19,7 @@
        string sql = 1;
        repeated Parameter parameters = 2;
        bool forceQuery = 3;
+       bool forceStall = 4;
 }
 
 message Request {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.3.6/db/db.go new/rqlite-9.3.9/db/db.go
--- old/rqlite-9.3.6/db/db.go   2025-12-23 16:05:26.000000000 +0100
+++ new/rqlite-9.3.9/db/db.go   2026-01-04 08:09:34.000000000 +0100
@@ -166,6 +166,23 @@
        MaxLifetimeClosed  int64         `json:"max_lifetime_closed"`
 }
 
+// CheckpointMeta contains metadata about a WAL checkpoint operation.
+type CheckpointMeta struct {
+       Code  int
+       Pages int
+       Moved int
+}
+
+// String returns a string representation of the CheckpointMeta.
+func (cm *CheckpointMeta) String() string {
+       return fmt.Sprintf("Code=%d, Pages=%d, Moved=%d", cm.Code, cm.Pages, 
cm.Moved)
+}
+
+// Success returns true if the checkpoint completed successfully.
+func (cm *CheckpointMeta) Success() bool {
+       return cm.Code == 0
+}
+
 // Open opens a file-based database using the default driver.
 func Open(dbPath string, fkEnabled, wal bool) (retDB *DB, retErr error) {
        return OpenWithDriver(DefaultDriver(), dbPath, fkEnabled, wal)
@@ -641,15 +658,15 @@
 
 // Checkpoint checkpoints the WAL file. If the WAL file is not enabled, this
 // function is a no-op.
-func (db *DB) Checkpoint(mode CheckpointMode) error {
-       return db.CheckpointWithTimeout(mode, 0)
+func (db *DB) Checkpoint(mode CheckpointMode) (*CheckpointMeta, error) {
+       return db.CheckpointWithTimeout(mode, 100)
 }
 
 // CheckpointWithTimeout performs a WAL checkpoint. If the checkpoint does not
 // run to completion within the given duration, an error is returned. If the
 // duration is 0, the busy timeout is not modified before executing the
 // checkpoint.
-func (db *DB) CheckpointWithTimeout(mode CheckpointMode, dur time.Duration) 
(err error) {
+func (db *DB) CheckpointWithTimeout(mode CheckpointMode, dur time.Duration) 
(meta *CheckpointMeta, err error) {
        start := time.Now()
        defer func() {
                if err != nil {
@@ -663,10 +680,10 @@
        if dur > 0 {
                rwBt, _, err := db.BusyTimeout()
                if err != nil {
-                       return fmt.Errorf("failed to get busy_timeout on 
checkpointing connection: %s", err.Error())
+                       return nil, fmt.Errorf("failed to get busy_timeout on 
checkpointing connection: %s", err.Error())
                }
                if err := db.SetBusyTimeout(int(dur.Milliseconds()), -1); err 
!= nil {
-                       return fmt.Errorf("failed to set busy_timeout on 
checkpointing connection: %s", err.Error())
+                       return nil, fmt.Errorf("failed to set busy_timeout on 
checkpointing connection: %s", err.Error())
                }
                defer func() {
                        // Reset back to default
@@ -678,15 +695,15 @@
 
        ok, nPages, nMoved, err := checkpointDB(db.rwDB, mode)
        if err != nil {
-               return fmt.Errorf("error checkpointing WAL: %s", err.Error())
+               return nil, fmt.Errorf("error checkpointing WAL: %s", 
err.Error())
        }
        stats.Add(numCheckpointedPages, int64(nPages))
        stats.Add(numCheckpointedMoves, int64(nMoved))
-       if ok != 0 {
-               return fmt.Errorf("failed to completely checkpoint WAL (%d ok, 
%d pages, %d moved)",
-                       ok, nPages, nMoved)
-       }
-       return nil
+       return &CheckpointMeta{
+               Code:  ok,
+               Pages: nPages,
+               Moved: nMoved,
+       }, nil
 }
 
 // DisableCheckpointing disables the automatic checkpointing that occurs when
@@ -1093,19 +1110,23 @@
 
 // Query executes queries that return rows, but don't modify the database.
 func (db *DB) Query(req *command.Request, xTime bool) ([]*command.QueryRows, 
error) {
-       stats.Add(numQueries, int64(len(req.Statements)))
-       conn, err := db.roDB.Conn(context.Background())
-       if err != nil {
-               return nil, err
-       }
-       defer conn.Close()
-
        ctx := context.Background()
        if req.DbTimeout > 0 {
                var cancel context.CancelFunc
                ctx, cancel = context.WithTimeout(ctx, 
time.Duration(req.DbTimeout))
                defer cancel()
        }
+       return db.QueryWithContext(ctx, req, xTime)
+}
+
+// QueryWithContext executes queries that return rows, but don't modify the 
database.
+func (db *DB) QueryWithContext(ctx context.Context, req *command.Request, 
xTime bool) ([]*command.QueryRows, error) {
+       stats.Add(numQueries, int64(len(req.Statements)))
+       conn, err := db.roDB.Conn(ctx)
+       if err != nil {
+               return nil, err
+       }
+       defer conn.Close()
        return db.queryWithConn(ctx, req, xTime, conn)
 }
 
@@ -1183,6 +1204,7 @@
        }()
        rows := &command.QueryRows{}
        start := time.Now()
+       forceStall := stmt.ForceStall
 
        parameters, err := parametersToValues(stmt.Parameters)
        if err != nil {
@@ -1233,6 +1255,18 @@
                        Parameters: params,
                })
 
+               // Check for slow query, blocked query, etc testing. This field
+               // should never set by production code and is only for 
fault-injection
+               // testing purposes.
+               if forceStall {
+                       select {
+                       case <-make(chan struct{}):
+                       case <-ctx.Done():
+                               db.logger.Printf("forced stall on query 
cancelled: %s", ctx.Err().Error())
+                               forceStall = false
+                       }
+               }
+
                // One-time population of any empty types. Best effort, ignore
                // error.
                if needsQueryTypes {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.3.6/db/db_checkpoint_test.go 
new/rqlite-9.3.9/db/db_checkpoint_test.go
--- old/rqlite-9.3.6/db/db_checkpoint_test.go   2025-12-23 16:05:26.000000000 
+0100
+++ new/rqlite-9.3.9/db/db_checkpoint_test.go   2026-01-04 08:09:34.000000000 
+0100
@@ -2,12 +2,15 @@
 
 import (
        "bytes"
+       "context"
        "io"
        "os"
        "testing"
        "time"
 
+       command "github.com/rqlite/rqlite/v9/command/proto"
        "github.com/rqlite/rqlite/v9/db/wal"
+       "github.com/rqlite/rqlite/v9/internal/rsum"
 )
 
 // Test_WALDatabaseCheckpointOKNoWrites tests that a checkpoint succeeds
@@ -22,9 +25,194 @@
                t.Fatalf("failed to open database in WAL mode: %s", err.Error())
        }
        defer db.Close()
-       if err := db.Checkpoint(CheckpointTruncate); err != nil {
+       meta, err := db.Checkpoint(CheckpointTruncate)
+       if err != nil {
                t.Fatalf("failed to checkpoint database in WAL mode with 
nonexistent WAL: %s", err.Error())
        }
+       if !meta.Success() {
+               t.Fatalf("expected checkpoint to complete successfully")
+       }
+       if meta.Moved != 0 {
+               t.Fatalf("expected MOVED to be 0, got %d", meta.Moved)
+       }
+       if meta.Pages != 0 {
+               t.Fatalf("expected PAGES to be 0, got %d", meta.Pages)
+       }
+}
+
+// Test_WALDatabaseCheckpointOK tests that a checkpoint succeeds
+// with a write.
+func Test_WALDatabaseCheckpointOK(t *testing.T) {
+       path := mustTempFile()
+       defer os.Remove(path)
+
+       db, err := Open(path, false, true)
+       if err != nil {
+               t.Fatalf("failed to open database in WAL mode: %s", err.Error())
+       }
+       defer db.Close()
+
+       _, err = db.ExecuteStringStmt(`CREATE TABLE foo (id INTEGER NOT NULL 
PRIMARY KEY, name TEXT)`)
+       if err != nil {
+               t.Fatalf("failed to execute on single node: %s", err.Error())
+       }
+
+       meta, err := db.Checkpoint(CheckpointTruncate)
+       if err != nil {
+               t.Fatalf("failed to checkpoint database: %s", err.Error())
+       }
+       if !meta.Success() {
+               t.Fatalf("expected checkpoint to complete successfully")
+       }
+       if meta.Moved != 0 {
+               t.Fatalf("expected MOVED to be 0 since WAL was truncated, got 
%d", meta.Moved)
+       }
+       if meta.Pages != 0 {
+               t.Fatalf("expected PAGES to be 0 since WAL was truncated, got 
%d", meta.Pages)
+       }
+
+       // Ensure idempotency by checkpointing again.
+       meta, err = db.Checkpoint(CheckpointTruncate)
+       if err != nil {
+               t.Fatalf("failed to checkpoint database: %s", err.Error())
+       }
+       if !meta.Success() {
+               t.Fatalf("expected checkpoint to complete successfully")
+       }
+}
+
+func Test_WALDatabaseCheckpointFail_Blocked(t *testing.T) {
+       path := mustTempFile()
+       defer os.Remove(path)
+
+       db, err := Open(path, false, true)
+       if err != nil {
+               t.Fatalf("failed to open database in WAL mode: %s", err.Error())
+       }
+       defer db.Close()
+
+       _, err = db.ExecuteStringStmt(`CREATE TABLE foo (id INTEGER NOT NULL 
PRIMARY KEY, name TEXT)`)
+       if err != nil {
+               t.Fatalf("failed to execute on single node: %s", err.Error())
+       }
+       _, err = db.ExecuteStringStmt(`INSERT INTO foo(name) VALUES("alice")`)
+       if err != nil {
+               t.Fatalf("failed to execute INSERT on single node: %s", 
err.Error())
+       }
+
+       // Issue a long-running read that should block the checkpoint.
+       qr := &command.Request{
+               Statements: []*command.Statement{
+                       {
+                               Sql:        "SELECT * FROM foo",
+                               ForceStall: true,
+                       },
+               },
+       }
+       ctx, cancelFunc := context.WithCancel(context.Background())
+       go func() {
+               db.QueryWithContext(ctx, qr, false)
+       }()
+       time.Sleep(2 * time.Second)
+       meta, err := db.Checkpoint(CheckpointTruncate)
+       if err != nil {
+               t.Fatalf("failed to checkpoint database: %s", err.Error())
+       }
+       if meta.Success() {
+               t.Fatalf("expected checkpoint to be unsuccessful due to 
blocking read")
+       }
+
+       // Cancel the blocking read, and try again.
+       cancelFunc()
+       time.Sleep(2 * time.Second)
+
+       meta, err = db.Checkpoint(CheckpointTruncate)
+       if err != nil {
+               t.Fatalf("failed to checkpoint database: %s", err.Error())
+       }
+       if !meta.Success() {
+               t.Fatalf("expected checkpoint to be successful after blocking 
read was cancelled")
+       }
+}
+
+// Test_WALDatabaseCheckpointOK_NoWALChange tests that a checkpoint
+// that is blocked by a long-running read does not result in a
+// change to the WAL file. This is to show that we can safely retry
+// the truncate checkpoint later.
+func Test_WALDatabaseCheckpointOK_NoWALChange(t *testing.T) {
+       path := mustTempFile()
+       defer os.Remove(path)
+
+       db, err := Open(path, false, true)
+       if err != nil {
+               t.Fatalf("failed to open database in WAL mode: %s", err.Error())
+       }
+       defer db.Close()
+
+       _, err = db.ExecuteStringStmt(`CREATE TABLE foo (id INTEGER NOT NULL 
PRIMARY KEY, name TEXT)`)
+       if err != nil {
+               t.Fatalf("failed to execute on single node: %s", err.Error())
+       }
+       _, err = db.ExecuteStringStmt(`INSERT INTO foo(name) VALUES("alice")`)
+       if err != nil {
+               t.Fatalf("failed to execute INSERT on single node: %s", 
err.Error())
+       }
+
+       // Issue a long-running read that should block the checkpoint.
+       qr := &command.Request{
+               Statements: []*command.Statement{
+                       {
+                               Sql:        "SELECT * FROM foo",
+                               ForceStall: true,
+                       },
+               },
+       }
+       go func() {
+               db.Query(qr, false)
+       }()
+       time.Sleep(2 * time.Second)
+
+       _, err = db.ExecuteStringStmt(`INSERT INTO foo(name) VALUES("alice")`)
+       if err != nil {
+               t.Fatalf("failed to execute INSERT on single node: %s", 
err.Error())
+       }
+
+       // Get the hash of the WAL file before the checkpoint.
+       h1, err := rsum.MD5(db.WALPath())
+       if err != nil {
+               t.Fatalf("failed to hash WAL file: %s", err.Error())
+       }
+
+       _, err = db.ExecuteStringStmt(`PRAGMA BUSY_TIMEOUT = 1`)
+       if err != nil {
+               t.Fatalf("failed to execute on single node: %s", err.Error())
+       }
+       meta, err := db.Checkpoint(CheckpointTruncate)
+       if err != nil {
+               t.Fatalf("failed to checkpoint database: %s", err.Error())
+       }
+       if meta.Success() {
+               t.Fatalf("expected checkpoint to be unsuccessful due to 
blocking read")
+       }
+       if meta.Moved == 0 {
+               t.Fatalf("expected MOVED to be > 0 since some pages should have 
been moved")
+       }
+       if meta.Pages == 0 {
+               t.Fatalf("expected PAGES to be > 0 since WAL should have pages")
+       }
+       if meta.Moved >= meta.Pages {
+               t.Fatalf("expected MOVED to be < PAGES since checkpoint 
incomplete")
+       }
+
+       // Check hash again.
+       h2, err := rsum.MD5(db.WALPath())
+       if err != nil {
+               t.Fatalf("failed to hash WAL file: %s", err.Error())
+       }
+
+       if h1 != h2 {
+               t.Fatalf("expected WAL file to be unchanged after incomplete 
checkpoint")
+       }
 }
 
 // Test_WALDatabaseCheckpointOKDelete tests that a checkpoint returns no error
@@ -41,7 +229,7 @@
                t.Fatalf("WAL mode enabled")
        }
        defer db.Close()
-       if err := db.Checkpoint(CheckpointTruncate); err != nil {
+       if _, err := db.Checkpoint(CheckpointTruncate); err != nil {
                t.Fatalf("failed to checkpoint database in DELETE mode: %s", 
err.Error())
        }
 }
@@ -71,8 +259,14 @@
        }
 
        walPreBytes := mustReadBytes(db.WALPath())
-       if err := db.Checkpoint(CheckpointRestart); err != nil {
+       if meta, err := db.Checkpoint(CheckpointRestart); err != nil {
                t.Fatalf("failed to checkpoint database: %s", err.Error())
+       } else if !meta.Success() {
+               t.Fatalf("expected checkpoint to complete successfully")
+       } else if meta.Moved == 0 {
+               t.Fatalf("expected some pages to be moved during RESTART 
checkpoint")
+       } else if meta.Pages == 0 {
+               t.Fatalf("expected some pages to be in the WAL during RESTART 
checkpoint")
        }
        walPostBytes := mustReadBytes(db.WALPath())
        if !bytes.Equal(walPreBytes, walPostBytes) {
@@ -88,8 +282,12 @@
                t.Fatalf("expected %s, got %s", exp, got)
        }
 
-       if err := db.Checkpoint(CheckpointTruncate); err != nil {
+       if meta, err := db.Checkpoint(CheckpointTruncate); err != nil {
                t.Fatalf("failed to checkpoint database: %s", err.Error())
+       } else if !meta.Success() {
+               t.Fatalf("expected checkpoint to complete successfully")
+       } else if meta.Moved != 0 {
+               t.Fatalf("expected 0 pages to be moved during checkpoint 
truncate since nowrite since restart checkpoint")
        }
        sz, err := fileSize(db.WALPath())
        if err != nil {
@@ -148,8 +346,12 @@
                t.Fatalf("expected %s, got %s", exp, got)
        }
 
-       if err := db.CheckpointWithTimeout(CheckpointRestart, 
250*time.Millisecond); err == nil {
-               t.Fatal("expected error due to failure to checkpoint")
+       meta, err := db.CheckpointWithTimeout(CheckpointRestart, 
250*time.Millisecond)
+       if err != nil {
+               t.Fatal("expected no error when checkpoint times out due to a 
blocking read transaction")
+       }
+       if meta.Success() {
+               t.Fatal("expected checkpoint to be unsuccessful")
        }
 
        // Get some information on the WAL file before the checkpoint. The goal 
here is
@@ -174,7 +376,7 @@
        }
 
        blockingDB.Close()
-       if err := db.CheckpointWithTimeout(CheckpointRestart, 
250*time.Millisecond); err != nil {
+       if _, err := db.CheckpointWithTimeout(CheckpointRestart, 
250*time.Millisecond); err != nil {
                t.Fatalf("failed to checkpoint database: %s", err.Error())
        }
 }
@@ -220,9 +422,14 @@
                t.Fatalf("expected %s, got %s", exp, got)
        }
 
-       if err := db.CheckpointWithTimeout(CheckpointTruncate, 
250*time.Millisecond); err == nil {
-               t.Fatal("expected error due to failure to checkpoint")
+       meta, err := db.CheckpointWithTimeout(CheckpointRestart, 
250*time.Millisecond)
+       if err != nil {
+               t.Fatal("expected no error due to failure to checkpoint due to 
COMMIT")
+       }
+       if meta.Success() {
+               t.Fatal("expected checkpoint to be unsuccessful")
        }
+
        postWALBytes := mustReadBytes(db.WALPath())
        if !bytes.Equal(preWALBytes, postWALBytes) {
                t.Fatalf("wal file should be unchanged after checkpoint 
failure")
@@ -248,7 +455,7 @@
        }
 
        blockingDB.Close()
-       if err := db.CheckpointWithTimeout(CheckpointTruncate, 
250*time.Millisecond); err != nil {
+       if _, err := db.CheckpointWithTimeout(CheckpointTruncate, 
250*time.Millisecond); err != nil {
                t.Fatalf("failed to checkpoint database: %s", err.Error())
        }
        if mustFileSize(db.WALPath()) != 0 {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.3.6/db/db_test.go 
new/rqlite-9.3.9/db/db_test.go
--- old/rqlite-9.3.6/db/db_test.go      2025-12-23 16:05:26.000000000 +0100
+++ new/rqlite-9.3.9/db/db_test.go      2026-01-04 08:09:34.000000000 +0100
@@ -166,7 +166,7 @@
        }
 
        // Checkpoint to table exists in main database file.
-       if err := db.Checkpoint(CheckpointTruncate); err != nil {
+       if _, err := db.Checkpoint(CheckpointTruncate); err != nil {
                t.Fatalf("failed to checkpoint database: %s", err.Error())
        }
 
@@ -299,7 +299,7 @@
        testQ()
 
        // Confirm checkpoint works without error.
-       if err := db.Checkpoint(CheckpointRestart); err != nil {
+       if _, err := db.Checkpoint(CheckpointRestart); err != nil {
                t.Fatalf("failed to checkpoint database: %s", err.Error())
        }
        testQ()
@@ -381,7 +381,7 @@
                t.Fatalf("WAL sum did not change after insertion")
        }
 
-       if err := db.Checkpoint(CheckpointRestart); err != nil {
+       if _, err := db.Checkpoint(CheckpointRestart); err != nil {
                t.Fatalf("failed to checkpoint database: %s", err.Error())
        }
 
@@ -439,7 +439,7 @@
        // Checkpoint, and check time is later. On some platforms the time 
resolution isn't that
        // high, so we sleep so the test won't suffer a false failure.
        time.Sleep(1 * time.Second)
-       if err := db.Checkpoint(CheckpointRestart); err != nil {
+       if _, err := db.Checkpoint(CheckpointRestart); err != nil {
                t.Fatalf("failed to checkpoint database: %s", err.Error())
        }
        lm3, err := db.LastModified()
@@ -944,14 +944,14 @@
                t.Fatalf("WAL file exists but is empty")
        }
 
-       if err := db.Checkpoint(CheckpointTruncate); err != nil {
+       if _, err := db.Checkpoint(CheckpointTruncate); err != nil {
                t.Fatalf("failed to checkpoint database in WAL mode: %s", 
err.Error())
        }
        if mustFileSize(walPath) != 0 {
                t.Fatalf("WAL file exists but is non-empty")
        }
        // Checkpoint a second time, to ensure it's idempotent.
-       if err := db.Checkpoint(CheckpointTruncate); err != nil {
+       if _, err := db.Checkpoint(CheckpointTruncate); err != nil {
                t.Fatalf("failed to checkpoint database in WAL mode: %s", 
err.Error())
        }
 }
@@ -1081,7 +1081,7 @@
 
        // Confirm checkpoint works on all types of on-disk databases. Worst 
case, this
        // should be ignored.
-       if err := db.Checkpoint(CheckpointRestart); err != nil {
+       if _, err := db.Checkpoint(CheckpointRestart); err != nil {
                t.Fatalf("failed to checkpoint database in DELETE mode: %s", 
err.Error())
        }
 }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.3.6/db/state_test.go 
new/rqlite-9.3.9/db/state_test.go
--- old/rqlite-9.3.6/db/state_test.go   2025-12-23 16:05:26.000000000 +0100
+++ new/rqlite-9.3.9/db/state_test.go   2026-01-04 08:09:34.000000000 +0100
@@ -606,7 +606,7 @@
                }
                mustCopyFile(replayDBPath, dbPath)
                mustCopyFile(filepath.Join(replayDir, walFile+"_001"), walPath)
-               if err := db.Checkpoint(CheckpointTruncate); err != nil {
+               if _, err := db.Checkpoint(CheckpointTruncate); err != nil {
                        t.Fatalf("failed to checkpoint database in WAL mode: 
%s", err.Error())
                }
 
@@ -619,7 +619,7 @@
                        t.Fatalf("WAL file at %s does not exist", walPath)
                }
                mustCopyFile(filepath.Join(replayDir, walFile+"_002"), walPath)
-               if err := db.Checkpoint(CheckpointTruncate); err != nil {
+               if _, err := db.Checkpoint(CheckpointTruncate); err != nil {
                        t.Fatalf("failed to checkpoint database in WAL mode: 
%s", err.Error())
                }
 
@@ -705,7 +705,7 @@
        if _, err := srcDB.ExecuteStringStmt("CREATE TABLE foo (id INTEGER NOT 
NULL PRIMARY KEY, name TEXT)"); err != nil {
                t.Fatalf("failed to create table: %s", err.Error())
        }
-       if err := srcDB.Checkpoint(CheckpointTruncate); err != nil {
+       if _, err := srcDB.Checkpoint(CheckpointTruncate); err != nil {
                t.Fatalf("failed to checkpoint database in WAL mode: %s", 
err.Error())
        }
        mustCopyFile(dstPath, srcPath)
@@ -737,7 +737,7 @@
                dstWALPath := fmt.Sprintf("%s-%d", dstPath, i)
                mustCopyFile(dstWALPath, srcWALPath)
                dstWALs = append(dstWALs, dstWALPath)
-               if err := srcDB.Checkpoint(CheckpointTruncate); err != nil {
+               if _, err := srcDB.Checkpoint(CheckpointTruncate); err != nil {
                        t.Fatalf("failed to checkpoint database in WAL mode: 
%s", err.Error())
                }
        }
@@ -753,7 +753,7 @@
        dstWALPath := fmt.Sprintf("%s-postdelete", dstPath)
        mustCopyFile(dstWALPath, srcWALPath)
        dstWALs = append(dstWALs, dstWALPath)
-       if err := srcDB.Checkpoint(CheckpointTruncate); err != nil {
+       if _, err := srcDB.Checkpoint(CheckpointTruncate); err != nil {
                t.Fatalf("failed to checkpoint database in WAL mode: %s", 
err.Error())
        }
 
@@ -763,7 +763,7 @@
        dstWALPath = fmt.Sprintf("%s-postupdate", dstPath)
        mustCopyFile(dstWALPath, srcWALPath)
        dstWALs = append(dstWALs, dstWALPath)
-       if err := srcDB.Checkpoint(CheckpointTruncate); err != nil {
+       if _, err := srcDB.Checkpoint(CheckpointTruncate); err != nil {
                t.Fatalf("failed to checkpoint database in WAL mode: %s", 
err.Error())
        }
 
@@ -778,7 +778,7 @@
        dstWALPath = fmt.Sprintf("%s-create-tables", dstPath)
        mustCopyFile(dstWALPath, srcWALPath)
        dstWALs = append(dstWALs, dstWALPath)
-       if err := srcDB.Checkpoint(CheckpointTruncate); err != nil {
+       if _, err := srcDB.Checkpoint(CheckpointTruncate); err != nil {
                t.Fatalf("failed to checkpoint database in WAL mode: %s", 
err.Error())
        }
 
@@ -791,7 +791,7 @@
        dstWALPath = fmt.Sprintf("%s-post-create-tables", dstPath)
        mustCopyFile(dstWALPath, srcWALPath)
        dstWALs = append(dstWALs, dstWALPath)
-       if err := srcDB.Checkpoint(CheckpointTruncate); err != nil {
+       if _, err := srcDB.Checkpoint(CheckpointTruncate); err != nil {
                t.Fatalf("failed to checkpoint database in WAL mode: %s", 
err.Error())
        }
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.3.6/db/swappable_db.go 
new/rqlite-9.3.9/db/swappable_db.go
--- old/rqlite-9.3.6/db/swappable_db.go 2025-12-23 16:05:26.000000000 +0100
+++ new/rqlite-9.3.9/db/swappable_db.go 2026-01-04 08:09:34.000000000 +0100
@@ -135,7 +135,7 @@
 }
 
 // Checkpoint calls Checkpoint on the underlying database.
-func (s *SwappableDB) Checkpoint(mode CheckpointMode) error {
+func (s *SwappableDB) Checkpoint(mode CheckpointMode) (*CheckpointMeta, error) 
{
        s.dbMu.RLock()
        defer s.dbMu.RUnlock()
        return s.db.Checkpoint(mode)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.3.6/go.mod new/rqlite-9.3.9/go.mod
--- old/rqlite-9.3.6/go.mod     2025-12-23 16:05:26.000000000 +0100
+++ new/rqlite-9.3.9/go.mod     2026-01-04 08:09:34.000000000 +0100
@@ -6,11 +6,11 @@
        github.com/aws/aws-sdk-go-v2 v1.41.0
        github.com/aws/aws-sdk-go-v2/config v1.32.6
        github.com/aws/aws-sdk-go-v2/credentials v1.19.6
-       github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.17
-       github.com/aws/aws-sdk-go-v2/service/s3 v1.94.0
+       github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.18
+       github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0
        github.com/hashicorp/go-hclog v1.6.3
        github.com/hashicorp/raft v1.7.3
-       github.com/mattn/go-sqlite3 v1.14.32
+       github.com/mattn/go-sqlite3 v1.14.33
        github.com/mkideal/cli v0.2.7
        github.com/mkideal/pkg v0.1.3
        github.com/peterh/liner v1.2.2
@@ -47,7 +47,7 @@
        github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
        github.com/gogo/protobuf v1.3.2 // indirect
        github.com/golang/protobuf v1.5.4 // indirect
-       github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect
+       github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4 // indirect
        github.com/hashicorp/consul/api v1.33.0 // indirect
        github.com/hashicorp/errwrap v1.1.0 // indirect
        github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
@@ -82,5 +82,5 @@
 
 replace (
        github.com/armon/go-metrics => github.com/hashicorp/go-metrics v0.5.1
-       github.com/mattn/go-sqlite3 => github.com/rqlite/go-sqlite3 v1.42.0
+       github.com/mattn/go-sqlite3 => github.com/rqlite/go-sqlite3 v1.43.0
 )
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.3.6/go.sum new/rqlite-9.3.9/go.sum
--- old/rqlite-9.3.6/go.sum     2025-12-23 16:05:26.000000000 +0100
+++ new/rqlite-9.3.9/go.sum     2026-01-04 08:09:34.000000000 +0100
@@ -15,8 +15,8 @@
 github.com/aws/aws-sdk-go-v2/credentials v1.19.6/go.mod 
h1:SgHzKjEVsdQr6Opor0ihgWtkWdfRAIwxYzSJ8O85VHY=
 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 
h1:80+uETIWS1BqjnN9uJ0dBUaETh+P1XwFy5vwHwK5r9k=
 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16/go.mod 
h1:wOOsYuxYuB/7FlnVtzeBYRcjSRtQpAW0hCP7tIULMwo=
-github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.17 
h1:fODjlj9c1zIfZYFxdC6Z4GX/plrZUYI/5EklgA/24Hw=
-github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.17/go.mod 
h1:CEyBu8kavY5Tc8i/8A810DuKydd19Lrx2/TmcNdjOAk=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.18 
h1:9vWXHtaepwoAl/UuKzxwgOoJDXPCC3hvgNMfcmdS2Tk=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.18/go.mod 
h1:sKuUZ+MwUTuJbYvZ8pK0x10LvgcJK3Y4rmh63YBekwk=
 github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 
h1:rgGwPzb82iBYSvHMHXc8h9mRoOUBZIGFgKb9qniaZZc=
 github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16/go.mod 
h1:L/UxsGeKpGoIj6DxfhOWHWQ/kGKcd4I1VncE4++IyKA=
 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 
h1:1jtGzuV7c82xnqOVfx2F0xmJcOw5374L7N6juGW6x6U=
@@ -33,8 +33,8 @@
 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16/go.mod 
h1:iRSNGgOYmiYwSCXxXaKb9HfOEj40+oTKn8pTxMlYkRM=
 github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16 
h1:NSbvS17MlI2lurYgXnCOLvCFX38sBW4eiVER7+kkgsU=
 github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16/go.mod 
h1:SwT8Tmqd4sA6G1qaGdzWCJN99bUmPGHfRwwq3G5Qb+A=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.94.0 
h1:SWTxh/EcUCDVqi/0s26V6pVUq0BBG7kx0tDTmF/hCgA=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.94.0/go.mod 
h1:79S2BdqCJpScXZA2y+cpZuocWsjGjJINyXnOsf5DTz8=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0 
h1:MIWra+MSq53CFaXXAywB2qg9YvVZifkk6vEGl/1Qor0=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0/go.mod 
h1:79S2BdqCJpScXZA2y+cpZuocWsjGjJINyXnOsf5DTz8=
 github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 
h1:HpI7aMmJ+mm1wkSHIA2t5EaFFv5EFYXePW30p1EIrbQ=
 github.com/aws/aws-sdk-go-v2/service/signin v1.0.4/go.mod 
h1:C5RdGMYGlfM0gYq/tifqgn4EbyX99V15P2V3R+VHbQU=
 github.com/aws/aws-sdk-go-v2/service/sso v1.30.8 
h1:aM/Q24rIlS3bRAhTyFurowU8A0SMyGDtEOY/l/s/1Uw=
@@ -109,8 +109,8 @@
 github.com/google/gofuzz v1.0.0/go.mod 
h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
 github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
 github.com/google/uuid v1.6.0/go.mod 
h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 
h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod 
h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4 
h1:kEISI/Gx67NzH3nJxAmY/dGac80kKZgZt134u7Y/k1s=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4/go.mod 
h1:6Nz966r3vQYCqIzWsuEl9d7cf7mRhtDmm++sOxlnfxI=
 github.com/hashicorp/consul/api v1.33.0 
h1:MnFUzN1Bo6YDGi/EsRLbVNgA4pyCymmcswrE5j4OHBM=
 github.com/hashicorp/consul/api v1.33.0/go.mod 
h1:vLz2I/bqqCYiG0qRHGerComvbwSWKswc8rRFtnYBrIw=
 github.com/hashicorp/consul/sdk v0.17.0 
h1:N/JigV6y1yEMfTIhXoW0DXUecM2grQnFuRpY7PcLHLI=
@@ -237,8 +237,8 @@
 github.com/prometheus/procfs v0.0.8/go.mod 
h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
 github.com/prometheus/procfs v0.1.3/go.mod 
h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
 github.com/prometheus/procfs v0.6.0/go.mod 
h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/rqlite/go-sqlite3 v1.42.0 
h1:xfOaauBRqJyww/6g2z910/p3gyw/vjb2GgQYKfrZSrU=
-github.com/rqlite/go-sqlite3 v1.42.0/go.mod 
h1:R9H7CatgYBt3c+fSV/5yo2vLh4ZjCB0aMHdkv69fP4A=
+github.com/rqlite/go-sqlite3 v1.43.0 
h1:D1dno0cZ/ex99XME+R1r1MAp/XlvMWbFvx4KHO63S8Y=
+github.com/rqlite/go-sqlite3 v1.43.0/go.mod 
h1:R9H7CatgYBt3c+fSV/5yo2vLh4ZjCB0aMHdkv69fP4A=
 github.com/rqlite/raft-boltdb/v2 v2.0.0-20230523104317-c08e70f4de48 
h1:NZ62M+kT0JqhyFUMc8I4SMmfmD4NGJxhb2ePJQXjryc=
 github.com/rqlite/raft-boltdb/v2 v2.0.0-20230523104317-c08e70f4de48/go.mod 
h1:CRnsxgy5G8fAf5J+AM0yrsSdxXHKkIYOaq2sm+Q4DYc=
 github.com/rqlite/rqlite-disco-clients v0.0.0-20250205044118-8ada2b350099 
h1:5cqkVLdl6sGJSY3kiF2dqaA3bD+8OS5FUdZqO0BxXLU=
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.3.6/internal/rsync/multir_singlew.go 
new/rqlite-9.3.9/internal/rsync/multir_singlew.go
--- old/rqlite-9.3.6/internal/rsync/multir_singlew.go   2025-12-23 
16:05:26.000000000 +0100
+++ new/rqlite-9.3.9/internal/rsync/multir_singlew.go   2026-01-04 
08:09:34.000000000 +0100
@@ -1,21 +1,31 @@
 package rsync
 
 import (
-       "errors"
+       "fmt"
        "sync"
 )
 
-var (
-       // ErrMRSWConflict is returned when a MultiRSW operation fails.
-       ErrMRSWConflict = errors.New("MRSW conflict")
-)
+// ErrMRSWConflict is returned when a MultiRSW operation fails.
+type ErrMRSWConflict struct {
+       msg string
+}
+
+// Error implements the error interface for ErrMRSWConflict.
+func (e *ErrMRSWConflict) Error() string {
+       return e.msg
+}
+
+// NewErrMRSWConflict creates a new ErrMRSWConflict with the given message.
+func NewErrMRSWConflict(m string) error {
+       return &ErrMRSWConflict{msg: m}
+}
 
 // MultiRSW is a simple concurrency control mechanism that allows
 // multiple readers or a single writer to execute a critical section at a time.
 type MultiRSW struct {
-       writerActive bool
-       numReaders   int
-       mu           sync.Mutex
+       owner      string
+       numReaders int
+       mu         sync.Mutex
 }
 
 // NewMultiRSW creates a new MultiRSW instance.
@@ -27,8 +37,8 @@
 func (r *MultiRSW) BeginRead() error {
        r.mu.Lock()
        defer r.mu.Unlock()
-       if r.writerActive {
-               return ErrMRSWConflict
+       if r.owner != "" {
+               return NewErrMRSWConflict("MSRW conflict owner: " + r.owner)
        }
        r.numReaders++
        return nil
@@ -45,13 +55,19 @@
 }
 
 // BeginWrite attempts to enter the critical section as a writer.
-func (r *MultiRSW) BeginWrite() error {
+func (r *MultiRSW) BeginWrite(owner string) error {
        r.mu.Lock()
        defer r.mu.Unlock()
-       if r.writerActive || r.numReaders > 0 {
-               return ErrMRSWConflict
+       if owner == "" {
+               panic("owner cannot be empty")
+       }
+       if r.owner != "" {
+               return NewErrMRSWConflict("MSRW conflict owner: " + r.owner)
        }
-       r.writerActive = true
+       if r.numReaders > 0 {
+               return NewErrMRSWConflict(fmt.Sprintf("MSRW conflict %d readers 
active", r.numReaders))
+       }
+       r.owner = owner
        return nil
 }
 
@@ -59,25 +75,28 @@
 func (r *MultiRSW) EndWrite() {
        r.mu.Lock()
        defer r.mu.Unlock()
-       if !r.writerActive {
+       if r.owner == "" {
                panic("write done received but no write is active")
        }
-       r.writerActive = false
+       r.owner = ""
 }
 
 // UpgradeToWriter attempts to upgrade a read lock to a write lock. The
 // client must be the only reader in order to upgrade, and must already
 // be in a read lock.
-func (r *MultiRSW) UpgradeToWriter() error {
+func (r *MultiRSW) UpgradeToWriter(owner string) error {
        r.mu.Lock()
        defer r.mu.Unlock()
-       if r.writerActive || r.numReaders > 1 {
-               return ErrMRSWConflict
+       if r.owner != "" {
+               return NewErrMRSWConflict("MSRW conflict owner: " + r.owner)
+       }
+       if r.numReaders > 1 {
+               return NewErrMRSWConflict(fmt.Sprintf("MSRW conflict %d readers 
active", r.numReaders))
        }
        if r.numReaders == 0 {
                panic("upgrade attempted with no readers")
        }
-       r.writerActive = true
+       r.owner = owner
        r.numReaders = 0
        return nil
 }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.3.6/internal/rsync/multir_singlew_test.go 
new/rqlite-9.3.9/internal/rsync/multir_singlew_test.go
--- old/rqlite-9.3.6/internal/rsync/multir_singlew_test.go      2025-12-23 
16:05:26.000000000 +0100
+++ new/rqlite-9.3.9/internal/rsync/multir_singlew_test.go      2026-01-04 
08:09:34.000000000 +0100
@@ -14,20 +14,20 @@
        r.EndRead()
 
        // Test successful write lock
-       if err := r.BeginWrite(); err != nil {
+       if err := r.BeginWrite("owner1"); err != nil {
                t.Fatalf("Failed to acquire write lock: %v", err)
        }
        r.EndWrite()
 
        // Test that a write blocks other writers and readers.
-       err := r.BeginWrite()
+       err := r.BeginWrite("owner2")
        if err != nil {
                t.Fatalf("Failed to acquire write lock in goroutine: %v", err)
        }
        if err := r.BeginRead(); err == nil {
                t.Fatalf("Expected error when reading during active write, got 
none")
        }
-       if err := r.BeginWrite(); err == nil {
+       if err := r.BeginWrite("owner3"); err == nil {
                t.Fatalf("Expected error when writing during active write, got 
none")
        }
        r.EndWrite()
@@ -37,7 +37,7 @@
        if err != nil {
                t.Fatalf("Failed to acquire read lock in goroutine: %v", err)
        }
-       if err := r.BeginWrite(); err == nil {
+       if err := r.BeginWrite("owner4"); err == nil {
                t.Fatalf("Expected error when writing during active read, got 
none")
        }
        r.EndRead()
@@ -61,7 +61,7 @@
        if err := r.BeginRead(); err != nil {
                t.Fatalf("Failed to acquire read lock: %v", err)
        }
-       if err := r.UpgradeToWriter(); err != nil {
+       if err := r.UpgradeToWriter("owner11"); err != nil {
                t.Fatalf("Failed to upgrade to write lock: %v", err)
        }
        r.EndWrite()
@@ -73,7 +73,7 @@
        if err := r.BeginRead(); err != nil {
                t.Fatalf("Failed to acquire read lock: %v", err)
        }
-       if err := r.UpgradeToWriter(); err == nil {
+       if err := r.UpgradeToWriter("owner5"); err == nil {
                t.Fatalf("Expected error when upgrading with multiple readers, 
got none")
        }
        r.EndRead()
@@ -83,19 +83,19 @@
        if err := r.BeginRead(); err != nil {
                t.Fatalf("Failed to acquire read lock: %v", err)
        }
-       if err := r.UpgradeToWriter(); err != nil {
+       if err := r.UpgradeToWriter("owner6"); err != nil {
                t.Fatalf("Failed to upgrade to write lock: %v", err)
        }
-       if err := r.UpgradeToWriter(); err == nil {
+       if err := r.UpgradeToWriter("owner7"); err == nil {
                t.Fatalf("Expected error when double-ugrading, got none")
        }
        r.EndWrite()
 
        // Test that upgrades are blocked by other writers
-       if err := r.BeginWrite(); err != nil {
+       if err := r.BeginWrite("owner8"); err != nil {
                t.Fatalf("Failed to acquire write lock: %v", err)
        }
-       if err := r.UpgradeToWriter(); err == nil {
+       if err := r.UpgradeToWriter("owner9"); err == nil {
                t.Fatalf("Expected error when upgrading with an active writer, 
got none")
        }
        r.EndWrite()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.3.6/snapshot/store.go 
new/rqlite-9.3.9/snapshot/store.go
--- old/rqlite-9.3.6/snapshot/store.go  2025-12-23 16:05:26.000000000 +0100
+++ new/rqlite-9.3.9/snapshot/store.go  2026-01-04 08:09:34.000000000 +0100
@@ -64,6 +64,7 @@
 
        mu     sync.Mutex
        closed bool
+       logger *log.Logger
 }
 
 // NewLockingSink returns a new LockingSink.
@@ -71,6 +72,7 @@
        return &LockingSink{
                SnapshotSink: sink,
                str:          str,
+               logger:       log.New(os.Stderr, "[snapshot-locking-sink] ", 
log.LstdFlags),
        }
 }
 
@@ -88,6 +90,9 @@
 
 // Cancel cancels the sink, unlocking the Store for creation of a new sink.
 func (s *LockingSink) Cancel() error {
+       defer func() {
+               s.logger.Printf("sink %s canceled", s.ID())
+       }()
        s.mu.Lock()
        defer s.mu.Unlock()
        if s.closed {
@@ -171,7 +176,7 @@
 // be a problem, since snapshots are taken infrequently in one at a time.
 func (s *Store) Create(version raft.SnapshotVersion, index, term uint64, 
configuration raft.Configuration,
        configurationIndex uint64, trans raft.Transport) (retSink 
raft.SnapshotSink, retErr error) {
-       if err := s.mrsw.BeginWrite(); err != nil {
+       if err := s.mrsw.BeginWrite(fmt.Sprintf("snapshot-create-sink:%s", 
snapshotName(term, index))); err != nil {
                stats.Add(snapshotCreateMRSWFail, 1)
                return nil, err
        }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.3.6/snapshot/store_test.go 
new/rqlite-9.3.9/snapshot/store_test.go
--- old/rqlite-9.3.6/snapshot/store_test.go     2025-12-23 16:05:26.000000000 
+0100
+++ new/rqlite-9.3.9/snapshot/store_test.go     2026-01-04 08:09:34.000000000 
+0100
@@ -1,6 +1,7 @@
 package snapshot
 
 import (
+       "errors"
        "io"
        "os"
        "sort"
@@ -165,7 +166,8 @@
        }
 
        // Opening a snapshot should fail due to MRSW
-       if _, _, err := store.Open(sink.ID()); err != rsync.ErrMRSWConflict {
+       var expErr *rsync.ErrMRSWConflict
+       if _, _, err := store.Open(sink.ID()); !errors.As(err, &expErr) {
                t.Fatalf("wrong error returned: %v", err)
        }
 
@@ -256,7 +258,8 @@
                t.Fatalf("Failed to open snapshot: %v", err)
        }
        _, err = store.Create(1, 2, 3, makeTestConfiguration("1", 
"localhost:1"), 1, nil)
-       if err != rsync.ErrMRSWConflict {
+       var expErr *rsync.ErrMRSWConflict
+       if !errors.As(err, &expErr) {
                t.Fatalf("Expected MRSW conflict, got %v", err)
        }
        rc.Close()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.3.6/store/fsm.go 
new/rqlite-9.3.9/store/fsm.go
--- old/rqlite-9.3.6/store/fsm.go       2025-12-23 16:05:26.000000000 +0100
+++ new/rqlite-9.3.9/store/fsm.go       2026-01-04 08:09:34.000000000 +0100
@@ -4,11 +4,14 @@
        "expvar"
        "io"
        "log"
+       "os"
        "time"
 
        "github.com/hashicorp/raft"
 )
 
+var fsmSnapshotErrLogger = log.New(os.Stderr, "[fsm-snapshot] ", log.LstdFlags)
+
 // FSM is a wrapper around the Store which implements raft.FSM.
 type FSM struct {
        s *Store
@@ -59,12 +62,10 @@
                        }
                } else {
                        stats.Add(numSnapshotPersistsFailed, 1)
-                       if f.logger != nil {
-                               f.logger.Printf("failed to persist snapshot %s: 
%v", sink.ID(), retError)
-                       }
                }
        }()
        if err := f.FSMSnapshot.Persist(sink); err != nil {
+               fsmSnapshotErrLogger.Printf("failed to persist snapshot %s: 
%v", sink.ID(), err)
                return err
        }
        if f.Finalizer != nil {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.3.6/store/state.go 
new/rqlite-9.3.9/store/state.go
--- old/rqlite-9.3.6/store/state.go     2025-12-23 16:05:26.000000000 +0100
+++ new/rqlite-9.3.9/store/state.go     2026-01-04 08:09:34.000000000 +0100
@@ -210,9 +210,13 @@
 
        // Create a new snapshot, placing the configuration in as if it was
        // committed at index 1.
-       if err := db.Checkpoint(sql.CheckpointTruncate); err != nil {
+       meta, err := db.Checkpoint(sql.CheckpointTruncate)
+       if err != nil {
                return fmt.Errorf("failed to checkpoint database: %s", err)
        }
+       if !meta.Success() {
+               return fmt.Errorf("database checkpoint was not successful: %s", 
meta.String())
+       }
        tmpDBFD, err := os.Open(tmpDBPath)
        if err != nil {
                return fmt.Errorf("failed to open temporary database file: %s", 
err)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.3.6/store/store.go 
new/rqlite-9.3.9/store/store.go
--- old/rqlite-9.3.6/store/store.go     2025-12-23 16:05:26.000000000 +0100
+++ new/rqlite-9.3.9/store/store.go     2026-01-04 08:09:34.000000000 +0100
@@ -133,6 +133,8 @@
        backupCASRetryDelay       = 100 * time.Millisecond
        connectionPoolCount       = 5
        connectionTimeout         = 10 * time.Second
+       mustWALCheckpointDelay    = 50 * time.Millisecond
+       mustWALCheckpointTimeout  = 5 * time.Minute
        raftLogCacheSize          = 512
        trailingScale             = 1.25
        observerChanLen           = 50
@@ -154,6 +156,8 @@
        numSnapshotsIncremental           = "num_snapshots_incremental"
        numFullCheckpointFailed           = "num_full_checkpoint_failed"
        numWALCheckpointTruncateFailed    = "num_wal_checkpoint_truncate_failed"
+       numWALCheckpointIncomplete        = "num_wal_checkpoint_incomplete"
+       numWALMustCheckpoint              = "num_wal_must_checkpoint"
        numAutoVacuums                    = "num_auto_vacuums"
        numAutoVacuumsFailed              = "num_auto_vacuums_failed"
        autoVacuumDuration                = "auto_vacuum_duration"
@@ -221,6 +225,8 @@
        stats.Add(numSnapshotsIncremental, 0)
        stats.Add(numFullCheckpointFailed, 0)
        stats.Add(numWALCheckpointTruncateFailed, 0)
+       stats.Add(numWALCheckpointIncomplete, 0)
+       stats.Add(numWALMustCheckpoint, 0)
        stats.Add(numAutoVacuums, 0)
        stats.Add(numAutoVacuumsFailed, 0)
        stats.Add(autoVacuumDuration, 0)
@@ -349,6 +355,10 @@
        fsmTerm       atomic.Uint64
        fsmUpdateTime *rsync.AtomicTime // This is node-local time.
 
+       // readerMu allows blocking of all reads. This is used to handle
+       // specific, very rare, edge cases around WAL checkpointing.
+       readerMu sync.RWMutex
+
        // appendedAtTime is the Leader's clock time when that Leader appended 
the log entry.
        // The Leader that actually appended the log entry is not necessarily 
the current Leader.
        appendedAtTime *rsync.AtomicTime
@@ -1419,6 +1429,9 @@
 // upgraded to STRONG if the Store determines that is necessary to guarantee
 // a linearizable read.
 func (s *Store) Query(qr *proto.QueryRequest) (rows []*proto.QueryRows, level 
proto.ConsistencyLevel, raftIndex uint64, retErr error) {
+       s.readerMu.RLock()
+       defer s.readerMu.RUnlock()
+
        p := (*PragmaCheckRequest)(qr.Request)
        if err := p.Check(); err != nil {
                return nil, 0, 0, err
@@ -1530,6 +1543,9 @@
 
 // Request processes a request that may contain both Executes and Queries.
 func (s *Store) Request(eqr *proto.ExecuteQueryRequest) 
([]*proto.ExecuteQueryResponse, uint64, uint64, error) {
+       s.readerMu.RLock()
+       defer s.readerMu.RUnlock()
+
        p := (*PragmaCheckRequest)(eqr.Request)
        if err := p.Check(); err != nil {
                return nil, 0, 0, err
@@ -1633,6 +1649,9 @@
 // will be written directly to that file. Otherwise a temporary file will be 
created,
 // and that temporary file copied to dst.
 func (s *Store) Backup(br *proto.BackupRequest, dst io.Writer) (retErr error) {
+       s.readerMu.RLock()
+       defer s.readerMu.RUnlock()
+
        if !s.open.Is() {
                return ErrNotOpen
        }
@@ -1906,6 +1925,9 @@
 // http://sqlite.org/howtocorrupt.html states it is safe to do this
 // as long as the database is not written to during the call.
 func (s *Store) Database(leader bool) ([]byte, error) {
+       s.readerMu.RLock()
+       defer s.readerMu.RUnlock()
+
        if leader && s.raft.State() != raft.Leader {
                return nil, ErrNotLeader
        }
@@ -2506,9 +2528,21 @@
        var fsmSnapshot raft.FSMSnapshot
        if fullNeeded {
                chkStartTime := time.Now()
-               if err := s.db.Checkpoint(sql.CheckpointTruncate); err != nil {
+               meta, err := s.db.Checkpoint(sql.CheckpointTruncate)
+               if err != nil {
                        stats.Add(numFullCheckpointFailed, 1)
-                       return nil, err
+                       return nil, fmt.Errorf("snapshot can't complete due to 
FULL Snapshot checkpoint error (will retry): %s",
+                               err.Error())
+               }
+               if !meta.Success() {
+                       if meta.Moved < meta.Pages {
+                               stats.Add(numWALCheckpointIncomplete, 1)
+                               return nil, fmt.Errorf("snapshot can't complete 
due to FULL Snapshot checkpoint incomplete (will retry): %s)",
+                                       meta.String())
+                       }
+                       s.logger.Printf("full Snapshot checkpoint moved %d/%d 
pages, but did not truncate WAL, forcing truncate",
+                               meta.Moved, meta.Pages)
+                       s.mustTruncateCheckpoint()
                }
                
stats.Get(snapshotCreateChkTruncateDuration).(*expvar.Int).Set(time.Since(chkStartTime).Milliseconds())
                dbFD, err := os.Open(s.db.Path())
@@ -2567,11 +2601,22 @@
                                return nil, err
                        }
                        chkTStartTime := time.Now()
-                       if err := s.db.Checkpoint(sql.CheckpointTruncate); err 
!= nil {
+                       meta, err := s.db.Checkpoint(sql.CheckpointTruncate)
+                       if err != nil {
                                stats.Add(numWALCheckpointTruncateFailed, 1)
-                               return nil, fmt.Errorf("snapshot can't complete 
due to WAL checkpoint failure (will retry): %s",
+                               return nil, fmt.Errorf("snapshot can't complete 
due to WAL checkpoint error (will retry): %s",
                                        err.Error())
                        }
+                       if !meta.Success() {
+                               if meta.Moved < meta.Pages {
+                                       stats.Add(numWALCheckpointIncomplete, 1)
+                                       return nil, fmt.Errorf("snapshot can't 
complete due to Snapshot checkpoint incomplete (will retry %s)",
+                                               meta.String())
+                               }
+                               s.logger.Printf("incremental Snapshot 
checkpoint moved %d/%d pages, but did not truncate WAL, forcing truncate",
+                                       meta.Moved, meta.Pages)
+                               s.mustTruncateCheckpoint()
+                       }
                        
stats.Get(snapshotCreateChkTruncateDuration).(*expvar.Int).Set(time.Since(chkTStartTime).Milliseconds())
                        
stats.Get(snapshotPrecompactWALSize).(*expvar.Int).Set(walSzPre)
                        stats.Get(snapshotWALSize).(*expvar.Int).Set(walSzPost)
@@ -2784,7 +2829,7 @@
        defer func() {
                if retError != nil && retError != ErrNothingNewToSnapshot {
                        stats.Add(numUserSnapshotsFailed, 1)
-                       s.logger.Printf("failed to generate user-requested 
snapshot: %s", retError.Error())
+                       s.logger.Printf("failed to generate application-level 
snapshot: %s", retError.Error())
                }
        }()
 
@@ -2850,6 +2895,42 @@
        return closeCh, doneCh
 }
 
+// mustTruncateCheckpoint truncates the checkpointed WAL, retrying until 
successful or
+// timing out.
+//
+// This should be called if we hit a specifc edge case where all pages were 
moved but some
+// reader blocked truncation. The next write could start overwriting WAL 
frames at the start
+// of the WAL which would mean we would lose WAL data, so we need to forcibly 
truncate here.
+// We do this by blocking all readers (writes are already blocked). This 
handling is due to
+// research into SQLite and not seen as of yet.
+//
+// Finally, we could still panic here if we timeout trying to truncate. This 
could happen if
+// a reader external to rqlite just won't let go.
+func (s *Store) mustTruncateCheckpoint() {
+       startT := time.Now()
+       defer func() {
+               s.logger.Printf("forced WAL truncate checkpoint took %s", 
time.Since(startT))
+       }()
+
+       stats.Add(numWALMustCheckpoint, 1)
+       s.readerMu.Lock()
+       defer s.readerMu.Unlock()
+
+       ticker := time.NewTicker(mustWALCheckpointDelay)
+       defer ticker.Stop()
+       for {
+               select {
+               case <-ticker.C:
+                       meta, err := s.db.Checkpoint(sql.CheckpointTruncate)
+                       if err == nil && meta.Success() {
+                               return
+                       }
+               case <-time.After(mustWALCheckpointTimeout):
+                       panic("timed out trying to truncate checkpointed WAL")
+               }
+       }
+}
+
 // selfLeaderChange is called when this node detects that its leadership
 // status has changed.
 func (s *Store) selfLeaderChange(leader bool) {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.3.6/store/store_test.go 
new/rqlite-9.3.9/store/store_test.go
--- old/rqlite-9.3.6/store/store_test.go        2025-12-23 16:05:26.000000000 
+0100
+++ new/rqlite-9.3.9/store/store_test.go        2026-01-04 08:09:34.000000000 
+0100
@@ -2,6 +2,7 @@
 
 import (
        "bytes"
+       "context"
        "crypto/rand"
        "errors"
        "fmt"
@@ -2652,6 +2653,163 @@
        }
 }
 
+func Test_SingleNode_SnapshotFail_Blocked(t *testing.T) {
+       s, ln := mustNewStore(t)
+       defer ln.Close()
+
+       s.SnapshotThreshold = 8192
+       s.SnapshotInterval = time.Hour
+       s.NoSnapshotOnClose = true
+       if err := s.Open(); err != nil {
+               t.Fatalf("failed to open single-node store: %s", err.Error())
+       }
+       defer s.Close(true)
+       if err := s.Bootstrap(NewServer(s.ID(), s.Addr(), true)); err != nil {
+               t.Fatalf("failed to bootstrap single-node store: %s", 
err.Error())
+       }
+       if _, err := s.WaitForLeader(10 * time.Second); err != nil {
+               t.Fatalf("Error waiting for leader: %s", err)
+       }
+       er := executeRequestFromString(`CREATE TABLE foo (id INTEGER NOT NULL 
PRIMARY KEY, name TEXT)`,
+               false, false)
+       _, _, err := s.Execute(er)
+       if err != nil {
+               t.Fatalf("failed to execute on single node: %s", err.Error())
+       }
+
+       er = executeRequestFromString(`INSERT INTO foo(name) VALUES("fiona")`, 
false, false)
+       _, _, err = s.Execute(er)
+       if err != nil {
+               t.Fatalf("failed to execute on single node: %s", err.Error())
+       }
+
+       go func() {
+               qr := queryRequestFromString("SELECT * FROM foo", false, false)
+               qr.GetRequest().Statements[0].ForceStall = true
+               s.Query(qr)
+       }()
+
+       time.Sleep(2 * time.Second)
+       er = executeRequestFromString(`INSERT INTO foo(name) VALUES("bob")`, 
false, false)
+       _, _, err = s.Execute(er)
+       if err != nil {
+               t.Fatalf("failed to execute on single node: %s", err.Error())
+       }
+
+       if err := s.Snapshot(0); err == nil {
+               t.Fatalf("expected error snapshotting single-node store with 
stalled query")
+       }
+}
+
+// Test_SingleNode_SnapshotFail_Blocked_Retry tests that a snapshot operation
+// that requires a forced checkpoint and truncation does succeed once the
+// blocking query unblocks.
+func Test_SingleNode_SnapshotFail_Blocked_Retry(t *testing.T) {
+       s, ln := mustNewStore(t)
+       defer ln.Close()
+
+       s.SnapshotThreshold = 8192
+       s.SnapshotInterval = time.Hour
+       s.NoSnapshotOnClose = true
+       if err := s.Open(); err != nil {
+               t.Fatalf("failed to open single-node store: %s", err.Error())
+       }
+       defer s.Close(true)
+       if err := s.Bootstrap(NewServer(s.ID(), s.Addr(), true)); err != nil {
+               t.Fatalf("failed to bootstrap single-node store: %s", 
err.Error())
+       }
+       if _, err := s.WaitForLeader(10 * time.Second); err != nil {
+               t.Fatalf("Error waiting for leader: %s", err)
+       }
+       er := executeRequestFromString(`CREATE TABLE foo (id INTEGER NOT NULL 
PRIMARY KEY, name TEXT)`,
+               false, false)
+       _, _, err := s.Execute(er)
+       if err != nil {
+               t.Fatalf("failed to execute on single node: %s", err.Error())
+       }
+
+       er = executeRequestFromString(`INSERT INTO foo(name) VALUES("fiona")`, 
false, false)
+       _, _, err = s.Execute(er)
+       if err != nil {
+               t.Fatalf("failed to execute on single node: %s", err.Error())
+       }
+
+       ctx, cancelFunc := context.WithCancel(context.Background())
+       go func() {
+               qr := queryRequestFromString("SELECT * FROM foo", false, false)
+               qr.GetRequest().Statements[0].ForceStall = true
+
+               blockingDB, err := db.Open(s.dbPath, false, true)
+               if err != nil {
+                       t.Errorf("failed to open blocking DB connection: %s", 
err.Error())
+               }
+               defer blockingDB.Close()
+
+               _, err = blockingDB.QueryWithContext(ctx, qr.GetRequest(), 
false)
+               if err != nil {
+                       t.Errorf("failed to execute stalled query on blocking 
DB connection: %s", err.Error())
+               }
+       }()
+       time.Sleep(1 * time.Second)
+
+       success := false
+       var wg sync.WaitGroup
+       wg.Go(func() {
+               if err := s.Snapshot(0); err != nil {
+                       t.Errorf("failed to snapshot single-node store with 
released stalled query: %s", err.Error())
+               } else {
+                       success = true
+               }
+       })
+       time.Sleep(1 * time.Second)
+       cancelFunc()
+       wg.Wait()
+       if !success {
+               t.Fatalf("expected snapshot to succeed after blocking query 
released")
+       }
+
+       // Again, this time with a persistent snapshot.
+       er = executeRequestFromString(`INSERT INTO foo(name) VALUES("fiona")`, 
false, false)
+       _, _, err = s.Execute(er)
+       if err != nil {
+               t.Fatalf("failed to execute on single node: %s", err.Error())
+       }
+
+       ctx, cancelFunc = context.WithCancel(context.Background())
+       go func() {
+               qr := queryRequestFromString("SELECT * FROM foo", false, false)
+               qr.GetRequest().Statements[0].ForceStall = true
+
+               blockingDB, err := db.Open(s.dbPath, false, true)
+               if err != nil {
+                       t.Errorf("failed to open blocking DB connection: %s", 
err.Error())
+               }
+               defer blockingDB.Close()
+
+               _, err = blockingDB.QueryWithContext(ctx, qr.GetRequest(), 
false)
+               if err != nil {
+                       t.Errorf("failed to execute stalled query on blocking 
DB connection: %s", err.Error())
+               }
+       }()
+       time.Sleep(1 * time.Second)
+
+       success = false
+       var wg2 sync.WaitGroup
+       wg2.Go(func() {
+               if err := s.Snapshot(0); err != nil {
+                       t.Errorf("failed to snapshot single-node store with 
second released stalled query: %s", err.Error())
+               } else {
+                       success = true
+               }
+       })
+       time.Sleep(1 * time.Second)
+       cancelFunc()
+       wg2.Wait()
+       if !success {
+               t.Fatalf("expected snapshot to succeed after blocking query 
released")
+       }
+}
+
 func Test_OpenStoreSingleNode_OptimizeTimes(t *testing.T) {
        s0, ln0 := mustNewStore(t)
        defer s0.Close(true)

++++++ vendor.tar.xz ++++++
++++ 11352 lines of diff (skipped)

Reply via email to