David Rowley писал(а) 2024-09-24 01:07:
On Tue, 24 Sept 2024 at 02:47, Vladlen Popolitov
<v.popoli...@postgrespro.ru> wrote:
I agree, it is better to fix all them together. I also do not like this
hack, it will be removed from the patch, if I check and change
all <work_mem_vars> at once.
I think, it will take about 1 week to fix and test all changes. I will
estimate the total volume of the changes and think, how to group them
in the patch ( I hope, it will be only one patch)

There's a few places that do this:

Size maxBlockSize = ALLOCSET_DEFAULT_MAXSIZE;

/* choose the maxBlockSize to be no larger than 1/16 of work_mem */
while (16 * maxBlockSize > work_mem * 1024L)

I think since maxBlockSize is a Size variable, that the above should
probably be:

while (16 * maxBlockSize > (Size) work_mem * 1024)

Maybe there can be a precursor patch to fix all those to get rid of
the 'L' and cast to the type we're comparing to or assigning to rather
than trying to keep the result of the multiplication as a long.

Hi

I rechecked all <work_mem_vars>, that depend on MAX_KILOBYTES limit and fixed
all casts that are affected by 4-bytes long type in Windows 64-bit. Now
next variables are limited by 2TB in all 64-bit systems:
maintenance_work_mem
work_mem
logical_decoding_work_mem
max_stack_depth
autovacuum_work_mem
gin_pending_list_limit
wal_skip_threshold
Also wal_keep_size_mb, min_wal_size_mb, max_wal_size_mb,
max_slot_wal_keep_size_mb are not affected by "long" cast.

From 6d275cb66cb39b1f209a6b43db2ce377ec0d7ba8 Mon Sep 17 00:00:00 2001
From: Vladlen Popolitov <v.popoli...@postgrespro.ru>
Date: Tue, 1 Oct 2024 00:10:37 +0300
Subject: [PATCH v2] work_mem_vars limit increased in 64bit Windows

---
 src/backend/access/gin/ginfast.c                |  2 +-
 src/backend/access/gin/ginget.c                 |  2 +-
 src/backend/access/hash/hash.c                  |  2 +-
 src/backend/access/heap/vacuumlazy.c            |  2 +-
 src/backend/access/nbtree/nbtpage.c             |  2 +-
 src/backend/commands/vacuumparallel.c           |  2 +-
 src/backend/executor/execUtils.c                |  2 +-
 src/backend/executor/nodeBitmapIndexscan.c      |  2 +-
 src/backend/executor/nodeBitmapOr.c             |  2 +-
 src/backend/nodes/tidbitmap.c                   |  6 +++---
 src/backend/optimizer/path/costsize.c           | 12 ++++++------
 src/backend/replication/logical/reorderbuffer.c |  6 +++---
 src/backend/tcop/postgres.c                     |  4 ++--
 src/backend/utils/sort/tuplestore.c             |  2 +-
 src/include/nodes/tidbitmap.h                   |  2 +-
 src/include/utils/guc.h                         |  2 +-
 16 files changed, 26 insertions(+), 26 deletions(-)

diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c
index eeca3ed318..d707e459bb 100644
--- a/src/backend/access/gin/ginfast.c
+++ b/src/backend/access/gin/ginfast.c
@@ -456,7 +456,7 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
 	 * ginInsertCleanup() should not be called inside our CRIT_SECTION.
 	 */
 	cleanupSize = GinGetPendingListCleanupSize(index);
-	if (metadata->nPendingPages * GIN_PAGE_FREESIZE > cleanupSize * 1024L)
+	if (metadata->nPendingPages * GIN_PAGE_FREESIZE > cleanupSize * (Size)1024L)
 		needCleanup = true;
 
 	UnlockReleaseBuffer(metabuffer);
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 0b4f2ebadb..1f295d5907 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -125,7 +125,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 	Form_pg_attribute attr;
 
 	/* Initialize empty bitmap result */
-	scanEntry->matchBitmap = tbm_create(work_mem * 1024L, NULL);
+	scanEntry->matchBitmap = tbm_create(work_mem * INT64CONST(1024), NULL);
 
 	/* Null query cannot partial-match anything */
 	if (scanEntry->isPartialMatch &&
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index 5ce3609394..0063902021 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -120,7 +120,7 @@ hashbuild(Relation heap, Relation index, IndexInfo *indexInfo)
 	double		reltuples;
 	double		allvisfrac;
 	uint32		num_buckets;
-	long		sort_threshold;
+	uint64		sort_threshold;
 	HashBuildState buildstate;
 
 	/*
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index d82aa3d489..eb658f66f1 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -2883,7 +2883,7 @@ dead_items_alloc(LVRelState *vacrel, int nworkers)
 	 */
 
 	dead_items_info = (VacDeadItemsInfo *) palloc(sizeof(VacDeadItemsInfo));
-	dead_items_info->max_bytes = vac_work_mem * 1024L;
+	dead_items_info->max_bytes = vac_work_mem * (size_t)1024;
 	dead_items_info->num_items = 0;
 	vacrel->dead_items_info = dead_items_info;
 
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 01bbece6bf..4ab3f6129f 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -2969,7 +2969,7 @@ _bt_pendingfsm_init(Relation rel, BTVacState *vstate, bool cleanuponly)
 	 * int overflow here.
 	 */
 	vstate->bufsize = 256;
-	maxbufsize = (work_mem * 1024L) / sizeof(BTPendingFSM);
+	maxbufsize = (work_mem * INT64CONST(1024)) / sizeof(BTPendingFSM);
 	maxbufsize = Min(maxbufsize, INT_MAX);
 	maxbufsize = Min(maxbufsize, MaxAllocSize / sizeof(BTPendingFSM));
 	/* Stay sane with small work_mem */
diff --git a/src/backend/commands/vacuumparallel.c b/src/backend/commands/vacuumparallel.c
index 4fd6574e12..c4312e314c 100644
--- a/src/backend/commands/vacuumparallel.c
+++ b/src/backend/commands/vacuumparallel.c
@@ -375,7 +375,7 @@ parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes,
 		(nindexes_mwm > 0) ?
 		maintenance_work_mem / Min(parallel_workers, nindexes_mwm) :
 		maintenance_work_mem;
-	shared->dead_items_info.max_bytes = vac_work_mem * 1024L;
+	shared->dead_items_info.max_bytes = vac_work_mem * (size_t)1024;
 
 	/* Prepare DSA space for dead items */
 	dead_items = TidStoreCreateShared(shared->dead_items_info.max_bytes,
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index 5737f9f4eb..82e3662880 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -323,7 +323,7 @@ CreateWorkExprContext(EState *estate)
 	Size		maxBlockSize = ALLOCSET_DEFAULT_MAXSIZE;
 
 	/* choose the maxBlockSize to be no larger than 1/16 of work_mem */
-	while (16 * maxBlockSize > work_mem * 1024L)
+	while (16 * maxBlockSize > work_mem * (Size)1024L)
 		maxBlockSize >>= 1;
 
 	if (maxBlockSize < ALLOCSET_DEFAULT_INITSIZE)
diff --git a/src/backend/executor/nodeBitmapIndexscan.c b/src/backend/executor/nodeBitmapIndexscan.c
index 6df8e17ec8..f6aba014db 100644
--- a/src/backend/executor/nodeBitmapIndexscan.c
+++ b/src/backend/executor/nodeBitmapIndexscan.c
@@ -91,7 +91,7 @@ MultiExecBitmapIndexScan(BitmapIndexScanState *node)
 	else
 	{
 		/* XXX should we use less than work_mem for this? */
-		tbm = tbm_create(work_mem * 1024L,
+		tbm = tbm_create(work_mem * INT64CONST(1024),
 						 ((BitmapIndexScan *) node->ss.ps.plan)->isshared ?
 						 node->ss.ps.state->es_query_dsa : NULL);
 	}
diff --git a/src/backend/executor/nodeBitmapOr.c b/src/backend/executor/nodeBitmapOr.c
index 7029536c64..e189253d30 100644
--- a/src/backend/executor/nodeBitmapOr.c
+++ b/src/backend/executor/nodeBitmapOr.c
@@ -143,7 +143,7 @@ MultiExecBitmapOr(BitmapOrState *node)
 			if (result == NULL) /* first subplan */
 			{
 				/* XXX should we use less than work_mem for this? */
-				result = tbm_create(work_mem * 1024L,
+				result = tbm_create(work_mem * INT64CONST(1024),
 									((BitmapOr *) node->ps.plan)->isshared ?
 									node->ps.state->es_query_dsa : NULL);
 			}
diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c
index e8ab5d78fc..c438188e2e 100644
--- a/src/backend/nodes/tidbitmap.c
+++ b/src/backend/nodes/tidbitmap.c
@@ -263,7 +263,7 @@ static int	tbm_shared_comparator(const void *left, const void *right,
  * be allocated from the DSA.
  */
 TIDBitmap *
-tbm_create(long maxbytes, dsa_area *dsa)
+tbm_create(double maxbytes, dsa_area *dsa)
 {
 	TIDBitmap  *tbm;
 
@@ -1541,7 +1541,7 @@ pagetable_free(pagetable_hash *pagetable, void *pointer)
 long
 tbm_calculate_entries(double maxbytes)
 {
-	long		nbuckets;
+	Size		nbuckets;
 
 	/*
 	 * Estimate number of hashtable entries we can have within maxbytes. This
@@ -1554,5 +1554,5 @@ tbm_calculate_entries(double maxbytes)
 	nbuckets = Min(nbuckets, INT_MAX - 1);	/* safety limit */
 	nbuckets = Max(nbuckets, 16);	/* sanity limit */
 
-	return nbuckets;
+	return (long)nbuckets;
 }
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index e1523d15df..069095ef73 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -1903,7 +1903,7 @@ cost_tuplesort(Cost *startup_cost, Cost *run_cost,
 	double		input_bytes = relation_byte_size(tuples, width);
 	double		output_bytes;
 	double		output_tuples;
-	long		sort_mem_bytes = sort_mem * 1024L;
+	int64		sort_mem_bytes = sort_mem * INT64CONST(1024);
 
 	/*
 	 * We want to be sure the cost of a sort is never estimated as zero, even
@@ -2488,7 +2488,7 @@ cost_material(Path *path,
 	Cost		startup_cost = input_startup_cost;
 	Cost		run_cost = input_total_cost - input_startup_cost;
 	double		nbytes = relation_byte_size(tuples, width);
-	long		work_mem_bytes = work_mem * 1024L;
+	double		work_mem_bytes = work_mem * INT64CONST(1024);
 
 	path->rows = tuples;
 
@@ -3983,7 +3983,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path,
 	else if (enable_material && innersortkeys != NIL &&
 			 relation_byte_size(inner_path_rows,
 								inner_path->pathtarget->width) >
-			 (work_mem * 1024L))
+			 (double)(work_mem * INT64CONST(1024)))
 		path->materialize_inner = true;
 	else
 		path->materialize_inner = false;
@@ -4618,7 +4618,7 @@ cost_rescan(PlannerInfo *root, Path *path,
 				Cost		run_cost = cpu_tuple_cost * path->rows;
 				double		nbytes = relation_byte_size(path->rows,
 														path->pathtarget->width);
-				long		work_mem_bytes = work_mem * 1024L;
+				double		work_mem_bytes = work_mem * INT64CONST(1024);
 
 				if (nbytes > work_mem_bytes)
 				{
@@ -4645,7 +4645,7 @@ cost_rescan(PlannerInfo *root, Path *path,
 				Cost		run_cost = cpu_operator_cost * path->rows;
 				double		nbytes = relation_byte_size(path->rows,
 														path->pathtarget->width);
-				long		work_mem_bytes = work_mem * 1024L;
+				double		work_mem_bytes = work_mem * INT64CONST(1024);
 
 				if (nbytes > work_mem_bytes)
 				{
@@ -6480,7 +6480,7 @@ compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel,
 	 * the bitmap at one time.)
 	 */
 	heap_pages = Min(pages_fetched, baserel->pages);
-	maxentries = tbm_calculate_entries(work_mem * 1024L);
+	maxentries = tbm_calculate_entries(work_mem * INT64CONST(1024));
 
 	if (loop_count > 1)
 	{
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index 22bcf171ff..fe87872eb5 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -3638,7 +3638,7 @@ ReorderBufferCheckMemoryLimit(ReorderBuffer *rb)
 	 * haven't exceeded the memory limit.
 	 */
 	if (debug_logical_replication_streaming == DEBUG_LOGICAL_REP_STREAMING_BUFFERED &&
-		rb->size < logical_decoding_work_mem * 1024L)
+		rb->size < logical_decoding_work_mem * (Size)1024L)
 		return;
 
 	/*
@@ -3651,7 +3651,7 @@ ReorderBufferCheckMemoryLimit(ReorderBuffer *rb)
 	 * because a user can reduce the logical_decoding_work_mem to a smaller
 	 * value before the most recent change.
 	 */
-	while (rb->size >= logical_decoding_work_mem * 1024L ||
+	while (rb->size >= logical_decoding_work_mem * (Size)1024L ||
 		   (debug_logical_replication_streaming == DEBUG_LOGICAL_REP_STREAMING_IMMEDIATE &&
 			rb->size > 0))
 	{
@@ -3694,7 +3694,7 @@ ReorderBufferCheckMemoryLimit(ReorderBuffer *rb)
 	}
 
 	/* We must be under the memory limit now. */
-	Assert(rb->size < logical_decoding_work_mem * 1024L);
+	Assert(rb->size < logical_decoding_work_mem * (Size)1024L);
 
 }
 
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 7f5eada9d4..cc9e838d54 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -126,7 +126,7 @@ typedef struct BindParamCbData
  */
 
 /* max_stack_depth converted to bytes for speed of checking */
-static long max_stack_depth_bytes = 100 * 1024L;
+static int64 max_stack_depth_bytes = 100 * 1024L;
 
 /*
  * Stack base pointer -- initialized by PostmasterMain and inherited by
@@ -3627,7 +3627,7 @@ check_max_stack_depth(int *newval, void **extra, GucSource source)
 void
 assign_max_stack_depth(int newval, void *extra)
 {
-	long		newval_bytes = newval * 1024L;
+	int64		newval_bytes = newval * INT64CONST(1024);
 
 	max_stack_depth_bytes = newval_bytes;
 }
diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c
index a720d70200..bbc6f3047f 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -265,7 +265,7 @@ tuplestore_begin_common(int eflags, bool interXact, int maxKBytes)
 	state->truncated = false;
 	state->usedDisk = false;
 	state->maxSpace = 0;
-	state->allowedMem = maxKBytes * 1024L;
+	state->allowedMem = maxKBytes * INT64CONST(1024);
 	state->availMem = state->allowedMem;
 	state->myfile = NULL;
 
diff --git a/src/include/nodes/tidbitmap.h b/src/include/nodes/tidbitmap.h
index 1945f0639b..bfde9701f7 100644
--- a/src/include/nodes/tidbitmap.h
+++ b/src/include/nodes/tidbitmap.h
@@ -48,7 +48,7 @@ typedef struct TBMIterateResult
 
 /* function prototypes in nodes/tidbitmap.c */
 
-extern TIDBitmap *tbm_create(long maxbytes, dsa_area *dsa);
+extern TIDBitmap *tbm_create(double maxbytes, dsa_area *dsa);
 extern void tbm_free(TIDBitmap *tbm);
 extern void tbm_free_shared_area(dsa_area *dsa, dsa_pointer dp);
 
diff --git a/src/include/utils/guc.h b/src/include/utils/guc.h
index 840b0fe57f..7f716483aa 100644
--- a/src/include/utils/guc.h
+++ b/src/include/utils/guc.h
@@ -19,7 +19,7 @@
 
 /* upper limit for GUC variables measured in kilobytes of memory */
 /* note that various places assume the byte size fits in a "long" variable */
-#if SIZEOF_SIZE_T > 4 && SIZEOF_LONG > 4
+#if SIZEOF_SIZE_T > 4
 #define MAX_KILOBYTES	INT_MAX
 #else
 #define MAX_KILOBYTES	(INT_MAX / 1024)
-- 
2.42.0.windows.2

Reply via email to