On Fri, Feb 20, 2015 at 8:57 PM, Michael Paquier
<michael.paqu...@gmail.com> wrote:
> Attached is a new series. 0001 and 0002 are the same, 0003 and 0004
> the backend structures listed previously. I noticed as well that
> indexed_tlist in setrefs.c meritates some attention.

And after all those commits attached is a patch changing
HeapTupleHeaderData, using the following macro to track the size of
the structure:
#define SizeofHeapTupleHeader offsetof(HeapTupleHeaderData, t_bits)

Regards,
-- 
Michael
From 3951d103d5bff49f300c89e34d2fa7bd893b9fd4 Mon Sep 17 00:00:00 2001
From: Michael Paquier <mich...@otacoo.com>
Date: Fri, 20 Feb 2015 16:42:28 +0900
Subject: [PATCH] Switch HeapTupleHeaderData and MinimalTupleData to use
 flexible arrays

This is some more hacking related to FLEXIBLE_ARRAY_MEMBER.
---
 contrib/file_fdw/file_fdw.c                     |  2 +-
 contrib/postgres_fdw/postgres_fdw.c             |  2 +-
 src/backend/access/common/heaptuple.c           |  2 +-
 src/backend/access/heap/heapam.c                | 56 ++++++++++++-------------
 src/backend/access/heap/tuptoaster.c            | 10 ++---
 src/backend/catalog/toasting.c                  |  2 +-
 src/backend/executor/nodeHash.c                 |  2 +-
 src/backend/optimizer/path/costsize.c           |  7 ++--
 src/backend/optimizer/plan/planner.c            |  6 ++-
 src/backend/optimizer/plan/subselect.c          |  9 ++--
 src/backend/optimizer/prep/prepunion.c          |  3 +-
 src/backend/optimizer/util/plancat.c            |  2 +-
 src/backend/replication/logical/decode.c        | 30 ++++++-------
 src/backend/replication/logical/reorderbuffer.c | 26 +++++-------
 src/backend/utils/adt/trigfuncs.c               |  6 +--
 src/include/access/htup_details.h               | 10 +++--
 src/include/access/tuptoaster.h                 |  2 +-
 src/include/replication/reorderbuffer.h         |  7 +++-
 18 files changed, 95 insertions(+), 89 deletions(-)

diff --git a/contrib/file_fdw/file_fdw.c b/contrib/file_fdw/file_fdw.c
index d569760..4368897 100644
--- a/contrib/file_fdw/file_fdw.c
+++ b/contrib/file_fdw/file_fdw.c
@@ -932,7 +932,7 @@ estimate_size(PlannerInfo *root, RelOptInfo *baserel,
 		int			tuple_width;
 
 		tuple_width = MAXALIGN(baserel->width) +
-			MAXALIGN(sizeof(HeapTupleHeaderData));
+			MAXALIGN(SizeofHeapTupleHeader);
 		ntuples = clamp_row_est((double) stat_buf.st_size /
 								(double) tuple_width);
 	}
diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
index d76e739..8bb1c80 100644
--- a/contrib/postgres_fdw/postgres_fdw.c
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -519,7 +519,7 @@ postgresGetForeignRelSize(PlannerInfo *root,
 		{
 			baserel->pages = 10;
 			baserel->tuples =
-				(10 * BLCKSZ) / (baserel->width + sizeof(HeapTupleHeaderData));
+				(10 * BLCKSZ) / (baserel->width + SizeofHeapTupleHeader);
 		}
 
 		/* Estimate baserel size as best we can with local statistics. */
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 867035d..66db9c6 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -694,7 +694,7 @@ heap_form_tuple(TupleDesc tupleDescriptor,
 	/*
 	 * Determine total space needed
 	 */
-	len = offsetof(HeapTupleHeaderData, t_bits);
+	len = SizeofHeapTupleHeader;
 
 	if (hasnull)
 		len += BITMAPLEN(numberOfAttributes);
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 46060bc1..00f3b94 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -2186,8 +2186,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
 		XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
 		/* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
 		XLogRegisterBufData(0,
-			(char *) heaptup->t_data + offsetof(HeapTupleHeaderData, t_bits),
-					 heaptup->t_len - offsetof(HeapTupleHeaderData, t_bits));
+			(char *) heaptup->t_data + SizeofHeapTupleHeader,
+					 heaptup->t_len - SizeofHeapTupleHeader);
 
 		recptr = XLogInsert(RM_HEAP_ID, info);
 
@@ -2460,9 +2460,9 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
 				tuphdr->t_hoff = heaptup->t_data->t_hoff;
 
 				/* write bitmap [+ padding] [+ oid] + data */
-				datalen = heaptup->t_len - offsetof(HeapTupleHeaderData, t_bits);
+				datalen = heaptup->t_len - SizeofHeapTupleHeader;
 				memcpy(scratchptr,
-					   (char *) heaptup->t_data + offsetof(HeapTupleHeaderData, t_bits),
+					   (char *) heaptup->t_data + SizeofHeapTupleHeader,
 					   datalen);
 				tuphdr->datalen = datalen;
 				scratchptr += datalen;
@@ -2904,9 +2904,9 @@ l1:
 
 			XLogRegisterData((char *) &xlhdr, SizeOfHeapHeader);
 			XLogRegisterData((char *) old_key_tuple->t_data
-							 + offsetof(HeapTupleHeaderData, t_bits),
+							 + SizeofHeapTupleHeader,
 							 old_key_tuple->t_len
-							 - offsetof(HeapTupleHeaderData, t_bits));
+							 - SizeofHeapTupleHeader);
 		}
 
 		recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
@@ -6732,7 +6732,7 @@ log_heap_update(Relation reln, Buffer oldbuf,
 	xlhdr.t_infomask2 = newtup->t_data->t_infomask2;
 	xlhdr.t_infomask = newtup->t_data->t_infomask;
 	xlhdr.t_hoff = newtup->t_data->t_hoff;
-	Assert(offsetof(HeapTupleHeaderData, t_bits) + prefixlen + suffixlen <= newtup->t_len);
+	Assert(SizeofHeapTupleHeader + prefixlen + suffixlen <= newtup->t_len);
 
 	/*
 	 * PG73FORMAT: write bitmap [+ padding] [+ oid] + data
@@ -6743,8 +6743,8 @@ log_heap_update(Relation reln, Buffer oldbuf,
 	if (prefixlen == 0)
 	{
 		XLogRegisterBufData(0,
-		   ((char *) newtup->t_data) + offsetof(HeapTupleHeaderData, t_bits),
-		   newtup->t_len - offsetof(HeapTupleHeaderData, t_bits) -suffixlen);
+		   ((char *) newtup->t_data) + SizeofHeapTupleHeader,
+		   newtup->t_len - SizeofHeapTupleHeader -suffixlen);
 	}
 	else
 	{
@@ -6753,11 +6753,11 @@ log_heap_update(Relation reln, Buffer oldbuf,
 		 * two separate rdata entries.
 		 */
 		/* bitmap [+ padding] [+ oid] */
-		if (newtup->t_data->t_hoff - offsetof(HeapTupleHeaderData, t_bits) >0)
+		if (newtup->t_data->t_hoff - SizeofHeapTupleHeader >0)
 		{
 			XLogRegisterBufData(0,
-			((char *) newtup->t_data) + offsetof(HeapTupleHeaderData, t_bits),
-			 newtup->t_data->t_hoff - offsetof(HeapTupleHeaderData, t_bits));
+			((char *) newtup->t_data) + SizeofHeapTupleHeader,
+			 newtup->t_data->t_hoff - SizeofHeapTupleHeader);
 		}
 
 		/* data after common prefix */
@@ -6777,8 +6777,8 @@ log_heap_update(Relation reln, Buffer oldbuf,
 		XLogRegisterData((char *) &xlhdr_idx, SizeOfHeapHeader);
 
 		/* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
-		XLogRegisterData((char *) old_key_tuple->t_data + offsetof(HeapTupleHeaderData, t_bits),
-			   old_key_tuple->t_len - offsetof(HeapTupleHeaderData, t_bits));
+		XLogRegisterData((char *) old_key_tuple->t_data + SizeofHeapTupleHeader,
+			   old_key_tuple->t_len - SizeofHeapTupleHeader);
 	}
 
 	recptr = XLogInsert(RM_HEAP_ID, info);
@@ -7351,7 +7351,7 @@ heap_xlog_insert(XLogReaderState *record)
 	xl_heap_insert *xlrec = (xl_heap_insert *) XLogRecGetData(record);
 	Buffer		buffer;
 	Page		page;
-	struct
+	union
 	{
 		HeapTupleHeaderData hdr;
 		char		data[MaxHeapTupleSize];
@@ -7415,12 +7415,12 @@ heap_xlog_insert(XLogReaderState *record)
 		data += SizeOfHeapHeader;
 
 		htup = &tbuf.hdr;
-		MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
+		MemSet((char *) htup, 0, SizeofHeapTupleHeader);
 		/* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
-		memcpy((char *) htup + offsetof(HeapTupleHeaderData, t_bits),
+		memcpy((char *) htup + SizeofHeapTupleHeader,
 			   data,
 			   newlen);
-		newlen += offsetof(HeapTupleHeaderData, t_bits);
+		newlen += SizeofHeapTupleHeader;
 		htup->t_infomask2 = xlhdr.t_infomask2;
 		htup->t_infomask = xlhdr.t_infomask;
 		htup->t_hoff = xlhdr.t_hoff;
@@ -7469,7 +7469,7 @@ heap_xlog_multi_insert(XLogReaderState *record)
 	BlockNumber blkno;
 	Buffer		buffer;
 	Page		page;
-	struct
+	union
 	{
 		HeapTupleHeaderData hdr;
 		char		data[MaxHeapTupleSize];
@@ -7548,14 +7548,14 @@ heap_xlog_multi_insert(XLogReaderState *record)
 			newlen = xlhdr->datalen;
 			Assert(newlen <= MaxHeapTupleSize);
 			htup = &tbuf.hdr;
-			MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
+			MemSet((char *) htup, 0, SizeofHeapTupleHeader);
 			/* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
-			memcpy((char *) htup + offsetof(HeapTupleHeaderData, t_bits),
+			memcpy((char *) htup + SizeofHeapTupleHeader,
 				   (char *) tupdata,
 				   newlen);
 			tupdata += newlen;
 
-			newlen += offsetof(HeapTupleHeaderData, t_bits);
+			newlen += SizeofHeapTupleHeader;
 			htup->t_infomask2 = xlhdr->t_infomask2;
 			htup->t_infomask = xlhdr->t_infomask;
 			htup->t_hoff = xlhdr->t_hoff;
@@ -7618,7 +7618,7 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
 	uint16		prefixlen = 0,
 				suffixlen = 0;
 	char	   *newp;
-	struct
+	union
 	{
 		HeapTupleHeaderData hdr;
 		char		data[MaxHeapTupleSize];
@@ -7780,19 +7780,19 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
 		Assert(tuplen <= MaxHeapTupleSize);
 
 		htup = &tbuf.hdr;
-		MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
+		MemSet((char *) htup, 0, SizeofHeapTupleHeader);
 
 		/*
 		 * Reconstruct the new tuple using the prefix and/or suffix from the
 		 * old tuple, and the data stored in the WAL record.
 		 */
-		newp = (char *) htup + offsetof(HeapTupleHeaderData, t_bits);
+		newp = (char *) htup + SizeofHeapTupleHeader;
 		if (prefixlen > 0)
 		{
 			int			len;
 
 			/* copy bitmap [+ padding] [+ oid] from WAL record */
-			len = xlhdr.t_hoff - offsetof(HeapTupleHeaderData, t_bits);
+			len = xlhdr.t_hoff - SizeofHeapTupleHeader;
 			memcpy(newp, recdata, len);
 			recdata += len;
 			newp += len;
@@ -7802,7 +7802,7 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
 			newp += prefixlen;
 
 			/* copy new tuple data from WAL record */
-			len = tuplen - (xlhdr.t_hoff - offsetof(HeapTupleHeaderData, t_bits));
+			len = tuplen - (xlhdr.t_hoff - SizeofHeapTupleHeader);
 			memcpy(newp, recdata, len);
 			recdata += len;
 			newp += len;
@@ -7823,7 +7823,7 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
 		if (suffixlen > 0)
 			memcpy(newp, (char *) oldtup.t_data + oldtup.t_len - suffixlen, suffixlen);
 
-		newlen = offsetof(HeapTupleHeaderData, t_bits) + tuplen + prefixlen + suffixlen;
+		newlen = SizeofHeapTupleHeader + tuplen + prefixlen + suffixlen;
 		htup->t_infomask2 = xlhdr.t_infomask2;
 		htup->t_infomask = xlhdr.t_infomask;
 		htup->t_hoff = xlhdr.t_hoff;
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index deb3372..8464e87 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -677,7 +677,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
 	 */
 
 	/* compute header overhead --- this should match heap_form_tuple() */
-	hoff = offsetof(HeapTupleHeaderData, t_bits);
+	hoff = SizeofHeapTupleHeader;
 	if (has_nulls)
 		hoff += BITMAPLEN(numAttrs);
 	if (newtup->t_data->t_infomask & HEAP_HASOID)
@@ -963,7 +963,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
 		 * different conclusion about the size of the null bitmap, or even
 		 * whether there needs to be one at all.
 		 */
-		new_header_len = offsetof(HeapTupleHeaderData, t_bits);
+		new_header_len = SizeofHeapTupleHeader;
 		if (has_nulls)
 			new_header_len += BITMAPLEN(numAttrs);
 		if (olddata->t_infomask & HEAP_HASOID)
@@ -986,7 +986,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
 		/*
 		 * Copy the existing tuple header, but adjust natts and t_hoff.
 		 */
-		memcpy(new_data, olddata, offsetof(HeapTupleHeaderData, t_bits));
+		memcpy(new_data, olddata, SizeofHeapTupleHeader);
 		HeapTupleHeaderSetNatts(new_data, numAttrs);
 		new_data->t_hoff = new_header_len;
 		if (olddata->t_infomask & HEAP_HASOID)
@@ -1196,7 +1196,7 @@ toast_flatten_tuple_to_datum(HeapTupleHeader tup,
 	 *
 	 * This should match the reconstruction code in toast_insert_or_update.
 	 */
-	new_header_len = offsetof(HeapTupleHeaderData, t_bits);
+	new_header_len = SizeofHeapTupleHeader;
 	if (has_nulls)
 		new_header_len += BITMAPLEN(numAttrs);
 	if (tup->t_infomask & HEAP_HASOID)
@@ -1211,7 +1211,7 @@ toast_flatten_tuple_to_datum(HeapTupleHeader tup,
 	/*
 	 * Copy the existing tuple header, but adjust natts and t_hoff.
 	 */
-	memcpy(new_data, tup, offsetof(HeapTupleHeaderData, t_bits));
+	memcpy(new_data, tup, SizeofHeapTupleHeader);
 	HeapTupleHeaderSetNatts(new_data, numAttrs);
 	new_data->t_hoff = new_header_len;
 	if (tup->t_infomask & HEAP_HASOID)
diff --git a/src/backend/catalog/toasting.c b/src/backend/catalog/toasting.c
index e73252c..a1efddb 100644
--- a/src/backend/catalog/toasting.c
+++ b/src/backend/catalog/toasting.c
@@ -447,7 +447,7 @@ needs_toast_table(Relation rel)
 		return false;			/* nothing to toast? */
 	if (maxlength_unknown)
 		return true;			/* any unlimited-length attrs? */
-	tuple_length = MAXALIGN(offsetof(HeapTupleHeaderData, t_bits) +
+	tuple_length = MAXALIGN(SizeofHeapTupleHeader +
 							BITMAPLEN(tupdesc->natts)) +
 		MAXALIGN(data_length);
 	return (tuple_length > TOAST_TUPLE_THRESHOLD);
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index abd70b3..4b922b9 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -439,7 +439,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
 	 * don't count palloc overhead either.
 	 */
 	tupsize = HJTUPLE_OVERHEAD +
-		MAXALIGN(sizeof(MinimalTupleData)) +
+		MAXALIGN(offsetof(MinimalTupleData, t_bits)) +
 		MAXALIGN(tupwidth);
 	inner_rel_bytes = ntuples * tupsize;
 
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 020558b..d24b4d8 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -4036,11 +4036,11 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
 
 	/*
 	 * If we have a whole-row reference, estimate its width as the sum of
-	 * per-column widths plus sizeof(HeapTupleHeaderData).
+	 * per-column widths plus SizeofHeapTupleHeader.
 	 */
 	if (have_wholerow_var)
 	{
-		int32		wholerow_width = sizeof(HeapTupleHeaderData);
+		int32		wholerow_width = SizeofHeapTupleHeader;
 
 		if (reloid != InvalidOid)
 		{
@@ -4078,7 +4078,8 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
 static double
 relation_byte_size(double tuples, int width)
 {
-	return tuples * (MAXALIGN(width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
+	return tuples * (MAXALIGN(width) +
+					 MAXALIGN(SizeofHeapTupleHeader));
 }
 
 /*
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 5c4884f..1e6680c 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -2755,7 +2755,8 @@ choose_hashed_grouping(PlannerInfo *root,
 	 */
 
 	/* Estimate per-hash-entry space at tuple width... */
-	hashentrysize = MAXALIGN(path_width) + MAXALIGN(sizeof(MinimalTupleData));
+	hashentrysize = MAXALIGN(path_width) +
+		MAXALIGN(offsetof(MinimalTupleData, t_bits));
 	/* plus space for pass-by-ref transition values... */
 	hashentrysize += agg_costs->transitionSpace;
 	/* plus the per-hash-entry overhead */
@@ -2923,7 +2924,8 @@ choose_hashed_distinct(PlannerInfo *root,
 	 */
 
 	/* Estimate per-hash-entry space at tuple width... */
-	hashentrysize = MAXALIGN(path_width) + MAXALIGN(sizeof(MinimalTupleData));
+	hashentrysize = MAXALIGN(path_width) +
+		MAXALIGN(offsetof(MinimalTupleData, t_bits));
 	/* plus the per-hash-entry overhead */
 	hashentrysize += hash_agg_entry_size(0);
 
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index 78fb6b1..11c4dd9 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -974,12 +974,13 @@ subplan_is_hashable(Plan *plan)
 
 	/*
 	 * The estimated size of the subquery result must fit in work_mem. (Note:
-	 * we use sizeof(HeapTupleHeaderData) here even though the tuples will
-	 * actually be stored as MinimalTuples; this provides some fudge factor
-	 * for hashtable overhead.)
+	 * we use SizeofHeapTupleHeader here even though the tuples will actually
+	 * be stored as MinimalTuples; this provides some fudge factor for
+	 * hashtable overhead.)
 	 */
 	subquery_size = plan->plan_rows *
-		(MAXALIGN(plan->plan_width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
+		(MAXALIGN(plan->plan_width) +
+		 MAXALIGN(SizeofHeapTupleHeader));
 	if (subquery_size > work_mem * 1024L)
 		return false;
 
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index 05f601e..b290a08 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -832,7 +832,8 @@ choose_hashed_setop(PlannerInfo *root, List *groupClauses,
 	 * Don't do it if it doesn't look like the hashtable will fit into
 	 * work_mem.
 	 */
-	hashentrysize = MAXALIGN(input_plan->plan_width) + MAXALIGN(sizeof(MinimalTupleData));
+	hashentrysize = MAXALIGN(input_plan->plan_width) +
+		MAXALIGN(offsetof(MinimalTupleData, t_bits));
 
 	if (hashentrysize * dNumGroups > work_mem * 1024L)
 		return false;
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index fb7db6d..034b8dc 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -508,7 +508,7 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
 				int32		tuple_width;
 
 				tuple_width = get_rel_data_width(rel, attr_widths);
-				tuple_width += sizeof(HeapTupleHeaderData);
+				tuple_width += SizeofHeapTupleHeader;
 				tuple_width += sizeof(ItemIdData);
 				/* note: integer division is intentional here */
 				density = (BLCKSZ - SizeOfPageHeaderData) / tuple_width;
diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c
index 77c02ba..31487ea 100644
--- a/src/backend/replication/logical/decode.c
+++ b/src/backend/replication/logical/decode.c
@@ -765,21 +765,21 @@ DecodeMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
 			 * transactions.
 			 */
 			tuple->tuple.t_tableOid = InvalidOid;
-			tuple->tuple.t_data = &tuple->header;
+			tuple->tuple.t_data = &tuple->t_data.header;
 			tuple->tuple.t_len = datalen
-				+ offsetof(HeapTupleHeaderData, t_bits);
+				+ SizeofHeapTupleHeader;
 
-			memset(&tuple->header, 0, sizeof(HeapTupleHeaderData));
+			memset(&tuple->t_data.header, 0, SizeofHeapTupleHeader);
 
-			memcpy((char *) &tuple->header
-				   + offsetof(HeapTupleHeaderData, t_bits),
+			memcpy((char *) &tuple->t_data.header
+				   + SizeofHeapTupleHeader,
 				   (char *) data,
 				   datalen);
 			data += datalen;
 
-			tuple->header.t_infomask = xlhdr->t_infomask;
-			tuple->header.t_infomask2 = xlhdr->t_infomask2;
-			tuple->header.t_hoff = xlhdr->t_hoff;
+			tuple->t_data.header.t_infomask = xlhdr->t_infomask;
+			tuple->t_data.header.t_infomask2 = xlhdr->t_infomask2;
+			tuple->t_data.header.t_hoff = xlhdr->t_hoff;
 		}
 
 		/*
@@ -815,27 +815,27 @@ DecodeXLogTuple(char *data, Size len, ReorderBufferTupleBuf *tuple)
 	Assert(datalen >= 0);
 	Assert(datalen <= MaxHeapTupleSize);
 
-	tuple->tuple.t_len = datalen + offsetof(HeapTupleHeaderData, t_bits);
+	tuple->tuple.t_len = datalen + SizeofHeapTupleHeader;
 
 	/* not a disk based tuple */
 	ItemPointerSetInvalid(&tuple->tuple.t_self);
 
 	/* we can only figure this out after reassembling the transactions */
 	tuple->tuple.t_tableOid = InvalidOid;
-	tuple->tuple.t_data = &tuple->header;
+	tuple->tuple.t_data = &tuple->t_data.header;
 
 	/* data is not stored aligned, copy to aligned storage */
 	memcpy((char *) &xlhdr,
 		   data,
 		   SizeOfHeapHeader);
 
-	memset(&tuple->header, 0, sizeof(HeapTupleHeaderData));
+	memset(&tuple->t_data.header, 0, SizeofHeapTupleHeader);
 
-	memcpy((char *) &tuple->header + offsetof(HeapTupleHeaderData, t_bits),
+	memcpy((char *) &tuple->t_data.header + SizeofHeapTupleHeader,
 		   data + SizeOfHeapHeader,
 		   datalen);
 
-	tuple->header.t_infomask = xlhdr.t_infomask;
-	tuple->header.t_infomask2 = xlhdr.t_infomask2;
-	tuple->header.t_hoff = xlhdr.t_hoff;
+	tuple->t_data.header.t_infomask = xlhdr.t_infomask;
+	tuple->t_data.header.t_infomask2 = xlhdr.t_infomask2;
+	tuple->t_data.header.t_hoff = xlhdr.t_hoff;
 }
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index bcd5896..3226405 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -2014,14 +2014,12 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
 				newtup = change->data.tp.newtuple;
 
 				if (oldtup)
-					oldlen = offsetof(ReorderBufferTupleBuf, data)
-						+oldtup->tuple.t_len
-						- offsetof(HeapTupleHeaderData, t_bits);
+					oldlen = offsetof(ReorderBufferTupleBuf, t_data) +
+						oldtup->tuple.t_len;
 
 				if (newtup)
-					newlen = offsetof(ReorderBufferTupleBuf, data)
-						+newtup->tuple.t_len
-						- offsetof(HeapTupleHeaderData, t_bits);
+					newlen = offsetof(ReorderBufferTupleBuf, t_data) +
+						newtup->tuple.t_len;
 
 				sz += oldlen;
 				sz += newlen;
@@ -2262,27 +2260,25 @@ ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
 		case REORDER_BUFFER_CHANGE_DELETE:
 			if (change->data.tp.newtuple)
 			{
-				Size		len = offsetof(ReorderBufferTupleBuf, data)
-				+((ReorderBufferTupleBuf *) data)->tuple.t_len
-				- offsetof(HeapTupleHeaderData, t_bits);
+				Size		len = offsetof(ReorderBufferTupleBuf, t_data) +
+					((ReorderBufferTupleBuf *) data)->tuple.t_len;
 
 				change->data.tp.newtuple = ReorderBufferGetTupleBuf(rb);
 				memcpy(change->data.tp.newtuple, data, len);
 				change->data.tp.newtuple->tuple.t_data =
-					&change->data.tp.newtuple->header;
+					&change->data.tp.newtuple->t_data.header;
 				data += len;
 			}
 
 			if (change->data.tp.oldtuple)
 			{
-				Size		len = offsetof(ReorderBufferTupleBuf, data)
-				+((ReorderBufferTupleBuf *) data)->tuple.t_len
-				- offsetof(HeapTupleHeaderData, t_bits);
+				Size		len = offsetof(ReorderBufferTupleBuf, t_data) +
+					((ReorderBufferTupleBuf *) data)->tuple.t_len;
 
 				change->data.tp.oldtuple = ReorderBufferGetTupleBuf(rb);
 				memcpy(change->data.tp.oldtuple, data, len);
 				change->data.tp.oldtuple->tuple.t_data =
-					&change->data.tp.oldtuple->header;
+					&change->data.tp.oldtuple->t_data.header;
 				data += len;
 			}
 			break;
@@ -2660,7 +2656,7 @@ ReorderBufferToastReplace(ReorderBuffer *rb, ReorderBufferTXN *txn,
 	 */
 	tmphtup = heap_form_tuple(desc, attrs, isnull);
 	Assert(newtup->tuple.t_len <= MaxHeapTupleSize);
-	Assert(&newtup->header == newtup->tuple.t_data);
+	Assert(&newtup->t_data.header == newtup->tuple.t_data);
 
 	memcpy(newtup->tuple.t_data, tmphtup->t_data, tmphtup->t_len);
 	newtup->tuple.t_len = tmphtup->t_len;
diff --git a/src/backend/utils/adt/trigfuncs.c b/src/backend/utils/adt/trigfuncs.c
index fb79092..a8a75ef 100644
--- a/src/backend/utils/adt/trigfuncs.c
+++ b/src/backend/utils/adt/trigfuncs.c
@@ -84,9 +84,9 @@ suppress_redundant_updates_trigger(PG_FUNCTION_ARGS)
 		 HeapTupleHeaderGetNatts(oldheader)) &&
 		((newheader->t_infomask & ~HEAP_XACT_MASK) ==
 		 (oldheader->t_infomask & ~HEAP_XACT_MASK)) &&
-		memcmp(((char *) newheader) + offsetof(HeapTupleHeaderData, t_bits),
-			   ((char *) oldheader) + offsetof(HeapTupleHeaderData, t_bits),
-			   newtuple->t_len - offsetof(HeapTupleHeaderData, t_bits)) == 0)
+		memcmp(((char *) newheader) + SizeofHeapTupleHeader,
+			   ((char *) oldheader) + SizeofHeapTupleHeader,
+			   newtuple->t_len - SizeofHeapTupleHeader) == 0)
 	{
 		/* ... then suppress the update */
 		rettuple = NULL;
diff --git a/src/include/access/htup_details.h b/src/include/access/htup_details.h
index d2ad910..d3c088d 100644
--- a/src/include/access/htup_details.h
+++ b/src/include/access/htup_details.h
@@ -150,11 +150,13 @@ struct HeapTupleHeaderData
 
 	/* ^ - 23 bytes - ^ */
 
-	bits8		t_bits[1];		/* bitmap of NULLs -- VARIABLE LENGTH */
+	bits8		t_bits[FLEXIBLE_ARRAY_MEMBER];	/* bitmap of NULLs */
 
 	/* MORE DATA FOLLOWS AT END OF STRUCT */
 };
 
+#define SizeofHeapTupleHeader offsetof(HeapTupleHeaderData, t_bits)
+
 /* typedef appears in tupbasics.h */
 
 /*
@@ -498,7 +500,7 @@ do { \
  * you can, say, fit 2 tuples of size MaxHeapTupleSize/2 on the same page.
  */
 #define MaxHeapTupleSize  (BLCKSZ - MAXALIGN(SizeOfPageHeaderData + sizeof(ItemIdData)))
-#define MinHeapTupleSize  MAXALIGN(offsetof(HeapTupleHeaderData, t_bits))
+#define MinHeapTupleSize  MAXALIGN(SizeofHeapTupleHeader)
 
 /*
  * MaxHeapTuplesPerPage is an upper bound on the number of tuples that can
@@ -513,7 +515,7 @@ do { \
  */
 #define MaxHeapTuplesPerPage	\
 	((int) ((BLCKSZ - SizeOfPageHeaderData) / \
-			(MAXALIGN(offsetof(HeapTupleHeaderData, t_bits)) + sizeof(ItemIdData))))
+			(MAXALIGN(SizeofHeapTupleHeader) + sizeof(ItemIdData))))
 
 /*
  * MaxAttrSize is a somewhat arbitrary upper limit on the declared size of
@@ -579,7 +581,7 @@ struct MinimalTupleData
 
 	/* ^ - 23 bytes - ^ */
 
-	bits8		t_bits[1];		/* bitmap of NULLs -- VARIABLE LENGTH */
+	bits8		t_bits[FLEXIBLE_ARRAY_MEMBER];	/* bitmap of NULLs */
 
 	/* MORE DATA FOLLOWS AT END OF STRUCT */
 };
diff --git a/src/include/access/tuptoaster.h b/src/include/access/tuptoaster.h
index 331dd25..7d18535 100644
--- a/src/include/access/tuptoaster.h
+++ b/src/include/access/tuptoaster.h
@@ -90,7 +90,7 @@
 
 #define TOAST_MAX_CHUNK_SIZE	\
 	(EXTERN_TUPLE_MAX_SIZE -							\
-	 MAXALIGN(offsetof(HeapTupleHeaderData, t_bits)) -	\
+	 MAXALIGN(SizeofHeapTupleHeader) -					\
 	 sizeof(Oid) -										\
 	 sizeof(int32) -									\
 	 VARHDRSZ)
diff --git a/src/include/replication/reorderbuffer.h b/src/include/replication/reorderbuffer.h
index 5a1d9a0..dcfe2b3 100644
--- a/src/include/replication/reorderbuffer.h
+++ b/src/include/replication/reorderbuffer.h
@@ -28,8 +28,11 @@ typedef struct ReorderBufferTupleBuf
 
 	/* tuple, stored sequentially */
 	HeapTupleData tuple;
-	HeapTupleHeaderData header;
-	char		data[MaxHeapTupleSize];
+	union
+	{
+		HeapTupleHeaderData header;
+		char		data[MaxHeapTupleSize];
+	}	t_data;
 } ReorderBufferTupleBuf;
 
 /*
-- 
2.3.0

-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers

Reply via email to