Hi, I want to propose a bunch of patches which allow to reduce WAL traffic generated by CREATE INDEX for GiST, GIN and SP-GiST. Similarly to b-tree and RUM, we can now log index pages of other access methods only once in the end of indexbuild process. Implementation is based on generic_xlog.
Not only it decreases the amount of WAL generated, but also completely eliminates WAL overhead in case of error during index build. I also attached sql scripts which I used to measure xlog size.They show that pg_wal_lsn_diff for patched version is from 3 to 5 times smaller.
Not sure if regression tests are needed, since it is just an optimization. But I do not mind to add them if someone feels that it is necessary. -- Anastasia Lubennikova Postgres Professional: http://www.postgrespro.com The Russian Postgres Company
commit 179285fb5175d715c20fc95eca3087b6a1899ed9 Author: Anastasia <a.lubennik...@postgrespro.ru> Date: Wed Feb 28 17:45:54 2018 +0300 add function generate_xlog_for_rel() diff --git a/src/backend/access/transam/generic_xlog.c b/src/backend/access/transam/generic_xlog.c index ce02354..dd2c041 100644 --- a/src/backend/access/transam/generic_xlog.c +++ b/src/backend/access/transam/generic_xlog.c @@ -545,3 +545,34 @@ generic_mask(char *page, BlockNumber blkno) mask_unused_space(page); } + +/* + * Function to write generic xlog for every existing block of a relation. + * Caller is responsible for locking the relation exclusively. + */ +void +generate_xlog_for_rel(Relation rel) +{ + BlockNumber blkno; + BlockNumber nblocks; + + nblocks = RelationGetNumberOfBlocks(rel); + + elog(DEBUG2, "generate_xlog_for_rel '%s', nblocks %u BEGIN.", + RelationGetRelationName(rel), nblocks); + + for (blkno = 0; blkno < nblocks; blkno++) + { + Buffer buffer; + GenericXLogState *state; + + CHECK_FOR_INTERRUPTS(); + + buffer = ReadBuffer(rel, blkno); + LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); + + state = GenericXLogStart(rel); + GenericXLogRegisterBuffer(state, buffer, GENERIC_XLOG_FULL_IMAGE); + GenericXLogFinish(state); + + UnlockReleaseBuffer(buffer); + } + elog(DEBUG2, "generate_xlog_for_rel '%s' END.", RelationGetRelationName(rel)); +} diff --git a/src/include/access/generic_xlog.h b/src/include/access/generic_xlog.h index b23e1f6..33be157 100644 --- a/src/include/access/generic_xlog.h +++ b/src/include/access/generic_xlog.h @@ -42,4 +42,7 @@ extern const char *generic_identify(uint8 info); extern void generic_desc(StringInfo buf, XLogReaderState *record); extern void generic_mask(char *pagedata, BlockNumber blkno); +/* other utils */ +void generate_xlog_for_rel(Relation rel); + #endif /* GENERIC_XLOG_H */
commit e176bd8f650a4b112fa2e61960a27cb57329138c Author: Anastasia <a.lubennik...@postgrespro.ru> Date: Wed Feb 28 17:53:15 2018 +0300 optimal WAL for gin diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c index 37070b3..c615d3c 100644 --- a/src/backend/access/gin/ginbtree.c +++ b/src/backend/access/gin/ginbtree.c @@ -388,7 +388,7 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, /* It will fit, perform the insertion */ START_CRIT_SECTION(); - if (RelationNeedsWAL(btree->index)) + if (RelationNeedsWAL(btree->index) && !btree->isBuild) { XLogBeginInsert(); XLogRegisterBuffer(0, stack->buffer, REGBUF_STANDARD); @@ -409,7 +409,7 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, MarkBufferDirty(childbuf); } - if (RelationNeedsWAL(btree->index)) + if (RelationNeedsWAL(btree->index) && !btree->isBuild) { XLogRecPtr recptr; ginxlogInsert xlrec; @@ -566,7 +566,7 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, } /* write WAL record */ - if (RelationNeedsWAL(btree->index)) + if (RelationNeedsWAL(btree->index) && !btree->isBuild) { XLogRecPtr recptr; diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c index f9daaba..349baa7 100644 --- a/src/backend/access/gin/gindatapage.c +++ b/src/backend/access/gin/gindatapage.c @@ -592,7 +592,7 @@ dataBeginPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack, * Great, all the items fit on a single page. If needed, prepare data * for a WAL record describing the changes we'll make. */ - if (RelationNeedsWAL(btree->index)) + if (RelationNeedsWAL(btree->index) && !btree->isBuild) computeLeafRecompressWALData(leaf); /* @@ -629,6 +629,7 @@ dataBeginPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack, * subsequent insertions will probably also go to the end. This packs * the index somewhat tighter when appending to a table, which is very * common. + * */ if (!btree->isBuild) { @@ -718,7 +719,7 @@ dataExecPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack, dataPlaceToPageLeafRecompress(buf, leaf); /* If needed, register WAL data built by computeLeafRecompressWALData */ - if (RelationNeedsWAL(btree->index)) + if (RelationNeedsWAL(btree->index) && !btree->isBuild) { XLogRegisterBufData(0, leaf->walinfo, leaf->walinfolen); } @@ -1151,7 +1152,7 @@ dataExecPlaceToPageInternal(GinBtree btree, Buffer buf, GinBtreeStack *stack, pitem = (PostingItem *) insertdata; GinDataPageAddPostingItem(page, pitem, off); - if (RelationNeedsWAL(btree->index)) + if (RelationNeedsWAL(btree->index) && !btree->isBuild) { /* * This must be static, because it has to survive until XLogInsert, @@ -1768,6 +1769,7 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems, Pointer ptr; int nrootitems; int rootsize; + bool is_build = (buildStats != NULL); /* Construct the new root page in memory first. */ tmppage = (Page) palloc(BLCKSZ); @@ -1815,7 +1817,7 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems, PageRestoreTempPage(tmppage, page); MarkBufferDirty(buffer); - if (RelationNeedsWAL(index)) + if (RelationNeedsWAL(index) && !is_build) { XLogRecPtr recptr; ginxlogCreatePostingTree data; diff --git a/src/backend/access/gin/ginentrypage.c b/src/backend/access/gin/ginentrypage.c index 8107697..b0fdb23 100644 --- a/src/backend/access/gin/ginentrypage.c +++ b/src/backend/access/gin/ginentrypage.c @@ -571,7 +571,7 @@ entryExecPlaceToPage(GinBtree btree, Buffer buf, GinBtreeStack *stack, elog(ERROR, "failed to add item to index page in \"%s\"", RelationGetRelationName(btree->index)); - if (RelationNeedsWAL(btree->index)) + if (RelationNeedsWAL(btree->index) && !btree->isBuild) { /* * This must be static, because it has to survive until XLogInsert, diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c index 23f7285..1855894 100644 --- a/src/backend/access/gin/gininsert.c +++ b/src/backend/access/gin/gininsert.c @@ -17,6 +17,7 @@ #include "access/gin_private.h" #include "access/ginxlog.h" #include "access/xloginsert.h" +#include "access/generic_xlog.h" #include "catalog/index.h" #include "miscadmin.h" #include "storage/bufmgr.h" @@ -192,6 +193,7 @@ ginEntryInsert(GinState *ginstate, buildStats->nEntries++; ginPrepareEntryScan(&btree, attnum, key, category, ginstate); + btree.isBuild = (buildStats != NULL); stack = ginFindLeafPage(&btree, false, NULL); page = BufferGetPage(stack->buffer); @@ -342,23 +344,6 @@ ginbuild(Relation heap, Relation index, IndexInfo *indexInfo) GinInitBuffer(RootBuffer, GIN_LEAF); MarkBufferDirty(RootBuffer); - if (RelationNeedsWAL(index)) - { - XLogRecPtr recptr; - Page page; - - XLogBeginInsert(); - XLogRegisterBuffer(0, MetaBuffer, REGBUF_WILL_INIT | REGBUF_STANDARD); - XLogRegisterBuffer(1, RootBuffer, REGBUF_WILL_INIT); - - recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_CREATE_INDEX); - - page = BufferGetPage(RootBuffer); - PageSetLSN(page, recptr); - - page = BufferGetPage(MetaBuffer); - PageSetLSN(page, recptr); - } UnlockReleaseBuffer(MetaBuffer); UnlockReleaseBuffer(RootBuffer); @@ -413,7 +398,10 @@ ginbuild(Relation heap, Relation index, IndexInfo *indexInfo) * Update metapage stats */ buildstate.buildStats.nTotalPages = RelationGetNumberOfBlocks(index); - ginUpdateStats(index, &buildstate.buildStats); + ginUpdateStats(index, &buildstate.buildStats, true); + + /* + * Create generic wal records for all pages of relation, if necessary. + * It seems reasonable not to generate WAL, if we recieved interrupt + * signal. + */ + CHECK_FOR_INTERRUPTS(); + if (RelationNeedsWAL(index)) + generate_xlog_for_rel(index); /* * Return statistics diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c index 7bac7a1..a159a47 100644 --- a/src/backend/access/gin/ginutil.c +++ b/src/backend/access/gin/ginutil.c @@ -664,7 +664,7 @@ ginGetStats(Relation index, GinStatsData *stats) * Note: nPendingPages and ginVersion are *not* copied over */ void -ginUpdateStats(Relation index, const GinStatsData *stats) +ginUpdateStats(Relation index, const GinStatsData *stats, bool is_build) { Buffer metabuffer; Page metapage; @@ -694,7 +694,7 @@ ginUpdateStats(Relation index, const GinStatsData *stats) MarkBufferDirty(metabuffer); - if (RelationNeedsWAL(index)) + if (RelationNeedsWAL(index) && !is_build) { XLogRecPtr recptr; ginxlogUpdateMeta data; diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c index 398532d..4cd46c4 100644 --- a/src/backend/access/gin/ginvacuum.c +++ b/src/backend/access/gin/ginvacuum.c @@ -758,7 +758,7 @@ ginvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) /* Update the metapage with accurate page and entry counts */ idxStat.nTotalPages = npages; - ginUpdateStats(info->index, &idxStat); + ginUpdateStats(info->index, &idxStat, false); /* Finally, vacuum the FSM */ IndexFreeSpaceMapVacuum(info->index); diff --git a/src/include/access/gin.h b/src/include/access/gin.h index 0acdb88..7a9e9e0 100644 --- a/src/include/access/gin.h +++ b/src/include/access/gin.h @@ -71,6 +71,7 @@ extern int gin_pending_list_limit; /* ginutil.c */ extern void ginGetStats(Relation index, GinStatsData *stats); -extern void ginUpdateStats(Relation index, const GinStatsData *stats); +extern void ginUpdateStats(Relation index, + const GinStatsData *stats, bool is_build); #endif /* GIN_H */
commit 5b42c832b92b08d6eff394c5608bf62791b704d6 Author: Anastasia <a.lubennik...@postgrespro.ru> Date: Wed Feb 28 17:53:39 2018 +0300 optimial WAL for GIST diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index 51c32e4..b32a323 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -169,7 +169,7 @@ gistinsert(Relation r, Datum *values, bool *isnull, values, isnull, true /* size is currently bogus */ ); itup->t_tid = *ht_ctid; - gistdoinsert(r, itup, 0, giststate); + gistdoinsert(r, itup, 0, giststate, false); /* cleanup */ MemoryContextSwitchTo(oldCxt); @@ -215,7 +215,8 @@ gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, BlockNumber *newblkno, Buffer leftchildbuf, List **splitinfo, - bool markfollowright) + bool markfollowright, + bool is_build) { BlockNumber blkno = BufferGetBlockNumber(buffer); Page page = BufferGetPage(buffer); @@ -451,7 +452,7 @@ gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, * insertion for that. NB: The number of pages and data segments * specified here must match the calculations in gistXLogSplit()! */ - if (RelationNeedsWAL(rel)) + if (RelationNeedsWAL(rel) && !is_build) XLogEnsureRecordSpace(npage, 1 + npage * 2); START_CRIT_SECTION(); @@ -472,18 +473,20 @@ gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, PageRestoreTempPage(dist->page, BufferGetPage(dist->buffer)); dist->page = BufferGetPage(dist->buffer); - /* Write the WAL record */ - if (RelationNeedsWAL(rel)) + /* + * Write the WAL record. + * Do not write XLog entry if the insertion is caused by + * index build process. + */ + if (RelationNeedsWAL(rel) && !is_build) recptr = gistXLogSplit(is_leaf, - dist, oldrlink, oldnsn, leftchildbuf, - markfollowright); + dist, oldrlink, oldnsn, leftchildbuf, + markfollowright); else recptr = gistGetFakeLSN(rel); for (ptr = dist; ptr; ptr = ptr->next) - { PageSetLSN(ptr->page, recptr); - } /* * Return the new child buffers to the caller. @@ -537,7 +540,8 @@ gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, if (BufferIsValid(leftchildbuf)) MarkBufferDirty(leftchildbuf); - if (RelationNeedsWAL(rel)) + + if (RelationNeedsWAL(rel) && !is_build) { OffsetNumber ndeloffs = 0, deloffs[1]; @@ -560,6 +564,7 @@ gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, PageSetLSN(page, recptr); } + if (newblkno) *newblkno = blkno; } @@ -576,17 +581,28 @@ gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, * the full page image. There's a chicken-and-egg problem: if we updated * the child pages first, we wouldn't know the recptr of the WAL record * we're about to write. + * + * We use fakeLSNs for inserions caused by index build. And when it is + * finished, we write generic_xlog entry for each index page and update + * all LSNs. In order to keep NSNs less then LSNs after this update, we + * set NSN to InvalidXLogRecPtr, which is the smallest possible NSN. */ + if (BufferIsValid(leftchildbuf)) { Page leftpg = BufferGetPage(leftchildbuf); + XLogRecPtr fakerecptr = InvalidXLogRecPtr; - GistPageSetNSN(leftpg, recptr); - GistClearFollowRight(leftpg); + if (!is_build) + GistPageSetNSN(leftpg, recptr); + else + GistPageSetNSN(leftpg, fakerecptr); + GistClearFollowRight(leftpg); PageSetLSN(leftpg, recptr); } + END_CRIT_SECTION(); return is_split; @@ -598,7 +614,8 @@ gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, * so it does not bother releasing palloc'd allocations. */ void -gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) +gistdoinsert(Relation r, IndexTuple itup, Size freespace, + GISTSTATE *giststate, bool is_build) { ItemId iid; IndexTuple idxtuple; @@ -610,6 +627,7 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) memset(&state, 0, sizeof(GISTInsertState)); state.freespace = freespace; state.r = r; + state.is_build = is_build; /* Start from the root */ firststack.blkno = GIST_ROOT_BLKNO; @@ -1220,7 +1238,7 @@ gistinserttuples(GISTInsertState *state, GISTInsertStack *stack, oldoffnum, NULL, leftchild, &splitinfo, - true); + true, state->is_build); /* * Before recursing up in case the page was split, release locks on the diff --git a/src/backend/access/gist/gistbuild.c b/src/backend/access/gist/gistbuild.c index 434f15f..bfc10cd 100644 --- a/src/backend/access/gist/gistbuild.c +++ b/src/backend/access/gist/gistbuild.c @@ -20,6 +20,7 @@ #include "access/gist_private.h" #include "access/gistxlog.h" #include "access/xloginsert.h" +#include "access/generic_xlog.h" #include "catalog/index.h" #include "miscadmin.h" #include "optimizer/cost.h" @@ -178,18 +179,12 @@ gistbuild(Relation heap, Relation index, IndexInfo *indexInfo) MarkBufferDirty(buffer); - if (RelationNeedsWAL(index)) - { - XLogRecPtr recptr; - - XLogBeginInsert(); - XLogRegisterBuffer(0, buffer, REGBUF_WILL_INIT); - - recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_CREATE_INDEX); - PageSetLSN(page, recptr); - } - else - PageSetLSN(page, gistGetFakeLSN(heap)); + /* + * Do not write index pages to WAL unitl index build is finished. + * But we still need increasing LSNs on each page, so use FakeLSN, + * even for relations which eventually need WAL. + */ + PageSetLSN(page, gistGetFakeLSN(heap)); UnlockReleaseBuffer(buffer); @@ -223,6 +218,15 @@ gistbuild(Relation heap, Relation index, IndexInfo *indexInfo) freeGISTstate(buildstate.giststate); /* + * Create generic wal records for all pages of relation, if necessary. + * It seems reasonable not to generate WAL, if we recieved interrupt + * signal. + */ + CHECK_FOR_INTERRUPTS(); + if (RelationNeedsWAL(index)) + generate_xlog_for_rel(index); + + /* * Return statistics */ result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult)); @@ -484,7 +488,7 @@ gistBuildCallback(Relation index, * locked, we call gistdoinsert directly. */ gistdoinsert(index, itup, buildstate->freespace, - buildstate->giststate); + buildstate->giststate, true); } /* Update tuple count and total size. */ @@ -690,7 +694,7 @@ gistbufferinginserttuples(GISTBuildState *buildstate, Buffer buffer, int level, itup, ntup, oldoffnum, &placed_to_blk, InvalidBuffer, &splitinfo, - false); + false, true); /* * If this is a root split, update the root path item kept in memory. This diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c index 55cccd2..cb86299 100644 --- a/src/backend/access/gist/gistutil.c +++ b/src/backend/access/gist/gistutil.c @@ -975,6 +975,7 @@ gistproperty(Oid index_oid, int attno, * Temporary and unlogged GiST indexes are not WAL-logged, but we need LSNs * to detect concurrent page splits anyway. This function provides a fake * sequence of LSNs for that purpose. + * Persistent relations are also not WAL-logged while we build index. */ XLogRecPtr gistGetFakeLSN(Relation rel) @@ -995,7 +996,6 @@ gistGetFakeLSN(Relation rel) * Unlogged relations are accessible from other backends, and survive * (clean) restarts. GetFakeLSNForUnloggedRel() handles that for us. */ - Assert(rel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED); return GetFakeLSNForUnloggedRel(); } } diff --git a/src/include/access/gist_private.h b/src/include/access/gist_private.h index 36ed724..0588fc7 100644 --- a/src/include/access/gist_private.h +++ b/src/include/access/gist_private.h @@ -241,6 +241,7 @@ typedef struct { Relation r; Size freespace; /* free space to be left */ + bool is_build; GISTInsertStack *stack; } GISTInsertState; @@ -387,9 +388,9 @@ extern MemoryContext createTempGistContext(void); extern GISTSTATE *initGISTstate(Relation index); extern void freeGISTstate(GISTSTATE *giststate); extern void gistdoinsert(Relation r, - IndexTuple itup, - Size freespace, - GISTSTATE *GISTstate); + IndexTuple itup, + Size freespace, + GISTSTATE* giststate, bool is_build); /* A List of these is returned from gistplacetopage() in *splitinfo */ typedef struct @@ -404,7 +405,8 @@ extern bool gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, OffsetNumber oldoffnum, BlockNumber *newblkno, Buffer leftchildbuf, List **splitinfo, - bool markleftchild); + bool markleftchild, + bool is_build); extern SplitedPageLayout *gistSplit(Relation r, Page page, IndexTuple *itup, int len, GISTSTATE *giststate);
commit fe4c80efb0f40a6b113ac8274a7acb3c576918b2 Author: Anastasia <a.lubennik...@postgrespro.ru> Date: Wed Feb 28 17:54:00 2018 +0300 optimal WAL for spgist diff --git a/src/backend/access/spgist/spgdoinsert.c b/src/backend/access/spgist/spgdoinsert.c index 7bf26f8..2e08b00 100644 --- a/src/backend/access/spgist/spgdoinsert.c +++ b/src/backend/access/spgist/spgdoinsert.c @@ -289,7 +289,7 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple, MarkBufferDirty(current->buffer); - if (RelationNeedsWAL(index)) + if (RelationNeedsWAL(index) && !state->isBuild) { XLogRecPtr recptr; int flags; @@ -516,7 +516,7 @@ moveLeafs(Relation index, SpGistState *state, MarkBufferDirty(current->buffer); MarkBufferDirty(nbuf); - if (RelationNeedsWAL(index)) + if (RelationNeedsWAL(index) && !state->isBuild) { XLogRecPtr recptr; @@ -1334,7 +1334,7 @@ doPickSplit(Relation index, SpGistState *state, saveCurrent.buffer = InvalidBuffer; } - if (RelationNeedsWAL(index)) + if (RelationNeedsWAL(index) && !state->isBuild) { XLogRecPtr recptr; int flags; @@ -1531,7 +1531,7 @@ spgAddNodeAction(Relation index, SpGistState *state, MarkBufferDirty(current->buffer); - if (RelationNeedsWAL(index)) + if (RelationNeedsWAL(index) && !state->isBuild) { XLogRecPtr recptr; @@ -1644,7 +1644,7 @@ spgAddNodeAction(Relation index, SpGistState *state, MarkBufferDirty(saveCurrent.buffer); - if (RelationNeedsWAL(index)) + if (RelationNeedsWAL(index) && !state->isBuild) { XLogRecPtr recptr; int flags; @@ -1840,7 +1840,7 @@ spgSplitNodeAction(Relation index, SpGistState *state, MarkBufferDirty(current->buffer); - if (RelationNeedsWAL(index)) + if (RelationNeedsWAL(index) && !state->isBuild) { XLogRecPtr recptr; diff --git a/src/backend/access/spgist/spginsert.c b/src/backend/access/spgist/spginsert.c index 34d9b48..0623bd8 100644 --- a/src/backend/access/spgist/spginsert.c +++ b/src/backend/access/spgist/spginsert.c @@ -21,6 +21,7 @@ #include "access/spgxlog.h" #include "access/xlog.h" #include "access/xloginsert.h" +#include "access/generic_xlog.h" #include "catalog/index.h" #include "miscadmin.h" #include "storage/bufmgr.h" @@ -100,26 +101,6 @@ spgbuild(Relation heap, Relation index, IndexInfo *indexInfo) SpGistInitBuffer(nullbuffer, SPGIST_LEAF | SPGIST_NULLS); MarkBufferDirty(nullbuffer); - if (RelationNeedsWAL(index)) - { - XLogRecPtr recptr; - - XLogBeginInsert(); - - /* - * Replay will re-initialize the pages, so don't take full pages - * images. No other data to log. - */ - XLogRegisterBuffer(0, metabuffer, REGBUF_WILL_INIT | REGBUF_STANDARD); - XLogRegisterBuffer(1, rootbuffer, REGBUF_WILL_INIT | REGBUF_STANDARD); - XLogRegisterBuffer(2, nullbuffer, REGBUF_WILL_INIT | REGBUF_STANDARD); - - recptr = XLogInsert(RM_SPGIST_ID, XLOG_SPGIST_CREATE_INDEX); - - PageSetLSN(BufferGetPage(metabuffer), recptr); - PageSetLSN(BufferGetPage(rootbuffer), recptr); - PageSetLSN(BufferGetPage(nullbuffer), recptr); - } END_CRIT_SECTION(); @@ -145,6 +126,9 @@ spgbuild(Relation heap, Relation index, IndexInfo *indexInfo) SpGistUpdateMetaPage(index); + if (RelationNeedsWAL(index)) + generate_xlog_for_rel(index); + result = (IndexBuildResult *) palloc0(sizeof(IndexBuildResult)); result->heap_tuples = result->index_tuples = reltuples;
gin-WAL-test.sql
Description: application/sql
gist-WAL-test.sql
Description: application/sql
spgist-WAL-test.sql
Description: application/sql