Hi ChangAo,

Thanks for the review!

I've attached v2, which adds a shmem_cleanup callback to
IoMethodOps, registered in AioShmemInit().

Like you, I don't know which approach I prefer since the
new callback currently has only one implementor. If no
other IO method is expected to need cleanup, the simpler
v1 approach may be preferable. Happy to hear your thoughts.

I've also added this patch to the commitfest app:
https://commitfest.postgresql.org/patch/6617/

Regards,
Lucas.
From 86fdc2a3daae35fc0db34af2e1a5c8702fe09797 Mon Sep 17 00:00:00 2001
From: Lucas DRAESCHER <[email protected]>
Date: Tue, 17 Mar 2026 17:26:11 +0100
Subject: [PATCH v2] Release io_uring resources on shmem exit

io_uring_queue_init() allocates resources for each io_uring
instance, but pgaio_uring_shmem_init() never registers a
cleanup callback to free them.

Add a shmem_cleanup callback to IoMethodOps registered in
AioShmemInit().

Implement the shmem_cleanup for method_io_uring.c as
pgaio_uring_shmem_cleanup() which calls
io_uring_queue_exit().
---
 src/backend/storage/aio/aio_init.c        | 18 ++++++++++++++++++
 src/backend/storage/aio/method_io_uring.c | 18 ++++++++++++++++++
 src/include/storage/aio_internal.h        |  6 ++++++
 3 files changed, 42 insertions(+)

diff --git a/src/backend/storage/aio/aio_init.c b/src/backend/storage/aio/aio_init.c
index d3c68d8b04c..abd09faaa2c 100644
--- a/src/backend/storage/aio/aio_init.c
+++ b/src/backend/storage/aio/aio_init.c
@@ -145,6 +145,20 @@ AioShmemSize(void)
 	return sz;
 }
 
+/*
+ * Wrapper around pgaio_method_ops->shmem_cleanup to satisfy the
+ * on_shmem_exit() callback signature.
+ */
+static void
+pgaio_shmem_cleanup(int code, Datum arg)
+{
+	/*
+	 * No null check needed here; AioShmemInit only registers this callback
+	 * when shmem_cleanup is non-null.
+	 */
+	pgaio_method_ops->shmem_cleanup();
+}
+
 void
 AioShmemInit(void)
 {
@@ -212,6 +226,10 @@ out:
 	/* Initialize IO method specific resources. */
 	if (pgaio_method_ops->shmem_init)
 		pgaio_method_ops->shmem_init(!found);
+
+	/* Register callback to release any resources allocated above. */
+	if (pgaio_method_ops->shmem_cleanup)
+		on_shmem_exit(pgaio_shmem_cleanup, 0);
 }
 
 void
diff --git a/src/backend/storage/aio/method_io_uring.c b/src/backend/storage/aio/method_io_uring.c
index 4867ded35ea..4dfe3cb02eb 100644
--- a/src/backend/storage/aio/method_io_uring.c
+++ b/src/backend/storage/aio/method_io_uring.c
@@ -51,6 +51,7 @@
 /* Entry points for IoMethodOps. */
 static size_t pgaio_uring_shmem_size(void);
 static void pgaio_uring_shmem_init(bool first_time);
+static void pgaio_uring_shmem_cleanup(void);
 static void pgaio_uring_init_backend(void);
 static int	pgaio_uring_submit(uint16 num_staged_ios, PgAioHandle **staged_ios);
 static void pgaio_uring_wait_one(PgAioHandle *ioh, uint64 ref_generation);
@@ -71,6 +72,7 @@ const IoMethodOps pgaio_uring_ops = {
 
 	.shmem_size = pgaio_uring_shmem_size,
 	.shmem_init = pgaio_uring_shmem_init,
+	.shmem_cleanup = pgaio_uring_shmem_cleanup,
 	.init_backend = pgaio_uring_init_backend,
 
 	.submit = pgaio_uring_submit,
@@ -395,6 +397,22 @@ pgaio_uring_shmem_init(bool first_time)
 	}
 }
 
+static void
+pgaio_uring_shmem_cleanup(void)
+{
+	if (pgaio_uring_contexts != NULL)
+	{
+		int			TotalProcs = pgaio_uring_procs();
+
+		elog(DEBUG1, "cleaning up %d io_uring processes", TotalProcs);
+
+		for (int i = 0; i < TotalProcs; i++)
+			io_uring_queue_exit(&pgaio_uring_contexts[i].io_uring_ring);
+
+		pgaio_uring_contexts = NULL;
+	}
+}
+
 static void
 pgaio_uring_init_backend(void)
 {
diff --git a/src/include/storage/aio_internal.h b/src/include/storage/aio_internal.h
index 5feea15be9e..fdc5dcced1f 100644
--- a/src/include/storage/aio_internal.h
+++ b/src/include/storage/aio_internal.h
@@ -282,6 +282,12 @@ typedef struct IoMethodOps
 	 */
 	void		(*shmem_init) (bool first_time);
 
+	/*
+	 * Clean up shared memory resources before shutdown. Called during shmem
+	 * exit. Optional.
+	 */
+	void		(*shmem_cleanup) (void);
+
 	/*
 	 * Per-backend initialization. Optional.
 	 */
-- 
2.53.0

Reply via email to