本文整理汇总了C++中PageIndexTupleDelete函数的典型用法代码示例。如果您正苦于以下问题:C++ PageIndexTupleDelete函数的具体用法?C++ PageIndexTupleDelete怎么用?C++ PageIndexTupleDelete使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了PageIndexTupleDelete函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: gistdelete
/*
* Retail deletion of a single tuple.
*
* NB: this is no longer called externally, but is still needed by
* gistlayerinsert(). That dependency will have to be fixed if GIST
* is ever going to allow concurrent insertions.
*/
static void
gistdelete(Relation r, ItemPointer tid)
{
BlockNumber blkno;
OffsetNumber offnum;
Buffer buf;
Page page;
/*
* Since GIST is not marked "amconcurrent" in pg_am, caller should
* have acquired exclusive lock on index relation. We need no locking
* here.
*/
blkno = ItemPointerGetBlockNumber(tid);
offnum = ItemPointerGetOffsetNumber(tid);
/* adjust any scans that will be affected by this deletion */
/* NB: this works only for scans in *this* backend! */
gistadjscans(r, GISTOP_DEL, blkno, offnum);
/* delete the index tuple */
buf = ReadBuffer(r, blkno);
page = BufferGetPage(buf);
PageIndexTupleDelete(page, offnum);
WriteBuffer(buf);
}
示例2: entryPreparePage
/*
* Delete tuple on leaf page if tuples was existed and we
* should update it, update old child blkno to new right page
* if child split is occured
*/
static BlockNumber
entryPreparePage(GinBtree btree, Page page, OffsetNumber off)
{
BlockNumber ret = InvalidBlockNumber;
Assert(btree->entry);
Assert(!GinPageIsData(page));
if (btree->isDelete)
{
Assert(GinPageIsLeaf(page));
PageIndexTupleDelete(page, off);
}
if (!GinPageIsLeaf(page) && btree->rightblkno != InvalidBlockNumber)
{
IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, off));
ItemPointerSet(&itup->t_tid, btree->rightblkno, InvalidOffsetNumber);
ret = btree->rightblkno;
}
btree->rightblkno = InvalidBlockNumber;
return ret;
}
示例3: entryPreparePage
/*
* Delete tuple on leaf page if tuples existed and we
* should update it, update old child blkno to new right page
* if child split occurred
*/
static BlockNumber
entryPreparePage(RumBtree btree, Page page, OffsetNumber off)
{
BlockNumber ret = InvalidBlockNumber;
Assert(btree->entry);
Assert(!RumPageIsData(page));
if (btree->isDelete)
{
Assert(RumPageIsLeaf(page));
PageIndexTupleDelete(page, off);
}
if (!RumPageIsLeaf(page) && btree->rightblkno != InvalidBlockNumber)
{
IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, off));
RumSetDownlink(itup, btree->rightblkno);
ret = btree->rightblkno;
}
btree->rightblkno = InvalidBlockNumber;
return ret;
}
示例4: entryPreparePage
/*
* Delete tuple on leaf page if tuples existed and we
* should update it, update old child blkno to new right page
* if child split occurred
*/
static void
entryPreparePage(GinBtree btree, Page page, OffsetNumber off,
GinBtreeEntryInsertData *insertData, BlockNumber updateblkno)
{
Assert(insertData->entry);
Assert(!GinPageIsData(page));
if (insertData->isDelete)
{
Assert(GinPageIsLeaf(page));
PageIndexTupleDelete(page, off);
}
if (!GinPageIsLeaf(page) && updateblkno != InvalidBlockNumber)
{
IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, off));
GinSetDownlink(itup, updateblkno);
}
}
示例5: addOrReplaceTuple
/*
* Add a leaf tuple, or replace an existing placeholder tuple. This is used
* to replay SpGistPageAddNewItem() operations. If the offset points at an
* existing tuple, it had better be a placeholder tuple.
*/
static void
addOrReplaceTuple(Page page, Item tuple, int size, OffsetNumber offset)
{
if (offset <= PageGetMaxOffsetNumber(page))
{
SpGistDeadTuple dt = (SpGistDeadTuple) PageGetItem(page,
PageGetItemId(page, offset));
if (dt->tupstate != SPGIST_PLACEHOLDER)
elog(ERROR, "SPGiST tuple to be replaced is not a placeholder");
Assert(SpGistPageGetOpaque(page)->nPlaceholder > 0);
SpGistPageGetOpaque(page)->nPlaceholder--;
PageIndexTupleDelete(page, offset);
}
Assert(offset <= PageGetMaxOffsetNumber(page) + 1);
if (PageAddItem(page, tuple, size, offset, false, false) != offset)
elog(ERROR, "failed to add item of size %u to SPGiST index page",
size);
}
示例6: ginRedoInsertEntry
static void
ginRedoInsertEntry(Buffer buffer, bool isLeaf, BlockNumber rightblkno, void *rdata)
{
Page page = BufferGetPage(buffer);
ginxlogInsertEntry *data = (ginxlogInsertEntry *) rdata;
OffsetNumber offset = data->offset;
IndexTuple itup;
if (rightblkno != InvalidBlockNumber)
{
/* update link to right page after split */
Assert(!GinPageIsLeaf(page));
Assert(offset >= FirstOffsetNumber && offset <= PageGetMaxOffsetNumber(page));
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offset));
GinSetDownlink(itup, rightblkno);
}
if (data->isDelete)
{
Assert(GinPageIsLeaf(page));
Assert(offset >= FirstOffsetNumber && offset <= PageGetMaxOffsetNumber(page));
PageIndexTupleDelete(page, offset);
}
itup = &data->tuple;
if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), offset, false, false) == InvalidOffsetNumber)
{
RelFileNode node;
ForkNumber forknum;
BlockNumber blknum;
BufferGetTag(buffer, &node, &forknum, &blknum);
elog(ERROR, "failed to add item to index page in %u/%u/%u",
node.spcNode, node.dbNode, node.relNode);
}
}
示例7: _hash_splitbucket
//.........这里部分代码省略.........
if (ooffnum > omaxoffnum)
{
/* at end of page, but check for an(other) overflow page */
oblkno = oopaque->hasho_nextblkno;
if (!BlockNumberIsValid(oblkno))
break;
/*
* we ran out of tuples on this particular page, but we
* have more overflow pages; advance to next page.
*/
_hash_wrtbuf(rel, obuf);
obuf = _hash_getbuf(rel, oblkno, HASH_WRITE);
opage = BufferGetPage(obuf);
_hash_checkpage(rel, opage, LH_OVERFLOW_PAGE);
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
ooffnum = FirstOffsetNumber;
omaxoffnum = PageGetMaxOffsetNumber(opage);
continue;
}
/*
* Re-hash the tuple to determine which bucket it now belongs in.
*
* It is annoying to call the hash function while holding locks,
* but releasing and relocking the page for each tuple is unappealing
* too.
*/
hitem = (HashItem) PageGetItem(opage, PageGetItemId(opage, ooffnum));
itup = &(hitem->hash_itup);
datum = index_getattr(itup, 1, itupdesc, &null);
Assert(!null);
bucket = _hash_hashkey2bucket(_hash_datum2hashkey(rel, datum),
maxbucket, highmask, lowmask);
if (bucket == nbucket)
{
/*
* insert the tuple into the new bucket. if it doesn't fit on
* the current page in the new bucket, we must allocate a new
* overflow page and place the tuple on that page instead.
*/
itemsz = IndexTupleDSize(hitem->hash_itup)
+ (sizeof(HashItemData) - sizeof(IndexTupleData));
itemsz = MAXALIGN(itemsz);
if (PageGetFreeSpace(npage) < itemsz)
{
/* write out nbuf and drop lock, but keep pin */
_hash_chgbufaccess(rel, nbuf, HASH_WRITE, HASH_NOLOCK);
/* chain to a new overflow page */
nbuf = _hash_addovflpage(rel, metabuf, nbuf);
npage = BufferGetPage(nbuf);
_hash_checkpage(rel, npage, LH_OVERFLOW_PAGE);
/* we don't need nopaque within the loop */
}
noffnum = OffsetNumberNext(PageGetMaxOffsetNumber(npage));
if (PageAddItem(npage, (Item) hitem, itemsz, noffnum, LP_USED)
== InvalidOffsetNumber)
elog(ERROR, "failed to add index item to \"%s\"",
RelationGetRelationName(rel));
/*
* now delete the tuple from the old bucket. after this
* section of code, 'ooffnum' will actually point to the
* ItemId to which we would point if we had advanced it before
* the deletion (PageIndexTupleDelete repacks the ItemId
* array). this also means that 'omaxoffnum' is exactly one
* less than it used to be, so we really can just decrement it
* instead of calling PageGetMaxOffsetNumber.
*/
PageIndexTupleDelete(opage, ooffnum);
omaxoffnum = OffsetNumberPrev(omaxoffnum);
}
else
{
/*
* the tuple stays on this page. we didn't move anything, so
* we didn't delete anything and therefore we don't have to
* change 'omaxoffnum'.
*/
Assert(bucket == obucket);
ooffnum = OffsetNumberNext(ooffnum);
}
}
/*
* We're at the end of the old bucket chain, so we're done partitioning
* the tuples. Before quitting, call _hash_squeezebucket to ensure the
* tuples remaining in the old bucket (including the overflow pages) are
* packed as tightly as possible. The new bucket is already tight.
*/
_hash_wrtbuf(rel, obuf);
_hash_wrtbuf(rel, nbuf);
_hash_squeezebucket(rel, obucket, start_oblkno);
}
示例8: rtbulkdelete
/*
* Bulk deletion of all index entries pointing to a set of heap tuples.
* The set of target tuples is specified via a callback routine that tells
* whether any given heap tuple (identified by ItemPointer) is being deleted.
*
* Result: a palloc'd struct containing statistical info for VACUUM displays.
*/
Datum
rtbulkdelete(PG_FUNCTION_ARGS)
{
Relation rel = (Relation) PG_GETARG_POINTER(0);
IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(1);
void *callback_state = (void *) PG_GETARG_POINTER(2);
IndexBulkDeleteResult *result;
BlockNumber num_pages;
double tuples_removed;
double num_index_tuples;
IndexScanDesc iscan;
tuples_removed = 0;
num_index_tuples = 0;
/*
* Since rtree is not marked "amconcurrent" in pg_am, caller should have
* acquired exclusive lock on index relation. We need no locking here.
*/
/*
* XXX generic implementation --- should be improved!
*/
/* walk through the entire index */
iscan = index_beginscan(NULL, rel, SnapshotAny, 0, NULL);
/* including killed tuples */
iscan->ignore_killed_tuples = false;
while (index_getnext_indexitem(iscan, ForwardScanDirection))
{
vacuum_delay_point();
if (callback(&iscan->xs_ctup.t_self, callback_state))
{
ItemPointerData indextup = iscan->currentItemData;
BlockNumber blkno;
OffsetNumber offnum;
Buffer buf;
Page page;
blkno = ItemPointerGetBlockNumber(&indextup);
offnum = ItemPointerGetOffsetNumber(&indextup);
/* adjust any scans that will be affected by this deletion */
/* (namely, my own scan) */
rtadjscans(rel, RTOP_DEL, blkno, offnum);
/* delete the index tuple */
buf = ReadBuffer(rel, blkno);
page = BufferGetPage(buf);
PageIndexTupleDelete(page, offnum);
WriteBuffer(buf);
tuples_removed += 1;
}
else
num_index_tuples += 1;
}
index_endscan(iscan);
/* return statistics */
num_pages = RelationGetNumberOfBlocks(rel);
result = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
result->num_pages = num_pages;
result->num_index_tuples = num_index_tuples;
result->tuples_removed = tuples_removed;
PG_RETURN_POINTER(result);
}
示例9: gistplacetopage
//.........这里部分代码省略.........
PageSetLSN(ptr->page, XLogRecPtrForTemp);
}
}
/* set up NSN */
oldnsn = GistPageGetOpaque(dist->page)->nsn;
if (state->stack->blkno == GIST_ROOT_BLKNO)
/* if root split we should put initial value */
oldnsn = PageGetLSN(dist->page);
for (ptr = dist; ptr; ptr = ptr->next)
{
/* only for last set oldnsn */
GistPageGetOpaque(ptr->page)->nsn = (ptr->next) ?
PageGetLSN(ptr->page) : oldnsn;
}
/*
* release buffers, if it was a root split then release all buffers
* because we create all buffers
*/
ptr = (state->stack->blkno == GIST_ROOT_BLKNO) ? dist : dist->next;
for (; ptr; ptr = ptr->next)
UnlockReleaseBuffer(ptr->buffer);
if (state->stack->blkno == GIST_ROOT_BLKNO)
{
gistnewroot(state->r, state->stack->buffer, state->itup, state->ituplen, &(state->key));
state->needInsertComplete = false;
}
END_CRIT_SECTION();
}
else
{
/* enough space */
START_CRIT_SECTION();
if (!is_leaf)
PageIndexTupleDelete(state->stack->page, state->stack->childoffnum);
gistfillbuffer(state->r, state->stack->page, state->itup, state->ituplen, InvalidOffsetNumber);
MarkBufferDirty(state->stack->buffer);
if (!state->r->rd_istemp)
{
OffsetNumber noffs = 0,
offs[1];
XLogRecPtr recptr;
XLogRecData *rdata;
if (!is_leaf)
{
/* only on inner page we should delete previous version */
offs[0] = state->stack->childoffnum;
noffs = 1;
}
rdata = formUpdateRdata(state->r, state->stack->buffer,
offs, noffs,
state->itup, state->ituplen,
&(state->key));
recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_UPDATE, rdata);
PageSetLSN(state->stack->page, recptr);
PageSetTLI(state->stack->page, ThisTimeLineID);
}
else
PageSetLSN(state->stack->page, XLogRecPtrForTemp);
if (state->stack->blkno == GIST_ROOT_BLKNO)
state->needInsertComplete = false;
END_CRIT_SECTION();
if (state->ituplen > 1)
{ /* previous is_splitted==true */
/*
* child was splited, so we must form union for insertion in
* parent
*/
IndexTuple newtup = gistunion(state->r, state->itup, state->ituplen, giststate);
ItemPointerSetBlockNumber(&(newtup->t_tid), state->stack->blkno);
state->itup[0] = newtup;
state->ituplen = 1;
}
else if (is_leaf)
{
/*
* itup[0] store key to adjust parent, we set it to valid to
* correct check by GistTupleIsInvalid macro in gistgetadjusted()
*/
ItemPointerSetBlockNumber(&(state->itup[0]->t_tid), state->stack->blkno);
GistTupleSetValid(state->itup[0]);
}
}
return is_splitted;
}
示例10: gistRedoPageUpdateRecord
/*
* redo any page update (except page split)
*/
static void
gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record)
{
char *begin = XLogRecGetData(record);
gistxlogPageUpdate *xldata = (gistxlogPageUpdate *) begin;
Buffer buffer;
Page page;
char *data;
if (BlockNumberIsValid(xldata->leftchild))
gistRedoClearFollowRight(xldata->node, lsn, xldata->leftchild);
/* nothing more to do if page was backed up (and no info to do it with) */
if (record->xl_info & XLR_BKP_BLOCK_1)
return;
buffer = XLogReadBuffer(xldata->node, xldata->blkno, false);
if (!BufferIsValid(buffer))
return;
page = (Page) BufferGetPage(buffer);
if (XLByteLE(lsn, PageGetLSN(page)))
{
UnlockReleaseBuffer(buffer);
return;
}
data = begin + sizeof(gistxlogPageUpdate);
/* Delete old tuples */
if (xldata->ntodelete > 0)
{
int i;
OffsetNumber *todelete = (OffsetNumber *) data;
data += sizeof(OffsetNumber) * xldata->ntodelete;
for (i = 0; i < xldata->ntodelete; i++)
PageIndexTupleDelete(page, todelete[i]);
if (GistPageIsLeaf(page))
GistMarkTuplesDeleted(page);
}
/* add tuples */
if (data - begin < record->xl_len)
{
OffsetNumber off = (PageIsEmpty(page)) ? FirstOffsetNumber :
OffsetNumberNext(PageGetMaxOffsetNumber(page));
while (data - begin < record->xl_len)
{
IndexTuple itup = (IndexTuple) data;
Size sz = IndexTupleSize(itup);
OffsetNumber l;
data += sz;
l = PageAddItem(page, (Item) itup, sz, off, false, false);
if (l == InvalidOffsetNumber)
elog(ERROR, "failed to add item to GiST index page, size %d bytes",
(int) sz);
off++;
}
}
else
{
/*
* special case: leafpage, nothing to insert, nothing to delete, then
* vacuum marks page
*/
if (GistPageIsLeaf(page) && xldata->ntodelete == 0)
GistClearTuplesDeleted(page);
}
if (!GistPageIsLeaf(page) && PageGetMaxOffsetNumber(page) == InvalidOffsetNumber && xldata->blkno == GIST_ROOT_BLKNO)
/*
* all links on non-leaf root page was deleted by vacuum full, so root
* page becomes a leaf
*/
GistPageSetLeaf(page);
GistPageGetOpaque(page)->rightlink = InvalidBlockNumber;
PageSetLSN(page, lsn);
PageSetTLI(page, ThisTimeLineID);
MarkBufferDirty(buffer);
UnlockReleaseBuffer(buffer);
}
示例11: spgRedoSplitTuple
static void
spgRedoSplitTuple(XLogRecPtr lsn, XLogRecord *record)
{
char *ptr = XLogRecGetData(record);
spgxlogSplitTuple *xldata = (spgxlogSplitTuple *) ptr;
char *prefixTuple;
SpGistInnerTupleData prefixTupleHdr;
char *postfixTuple;
SpGistInnerTupleData postfixTupleHdr;
Buffer buffer;
Page page;
ptr += sizeof(spgxlogSplitTuple);
prefixTuple = ptr;
/* the prefix tuple is unaligned, so make a copy to access its header */
memcpy(&prefixTupleHdr, prefixTuple, sizeof(SpGistInnerTupleData));
ptr += prefixTupleHdr.size;
postfixTuple = ptr;
/* postfix tuple is also unaligned */
memcpy(&postfixTupleHdr, postfixTuple, sizeof(SpGistInnerTupleData));
/*
* In normal operation we would have both pages locked simultaneously; but
* in WAL replay it should be safe to update them one at a time, as long
* as we do it in the right order.
*/
/* insert postfix tuple first to avoid dangling link */
if (xldata->blknoPostfix != xldata->blknoPrefix)
{
XLogRedoAction action;
if (xldata->newPage)
{
buffer = XLogReadBuffer(xldata->node, xldata->blknoPostfix, true);
/* SplitTuple is not used for nulls pages */
SpGistInitBuffer(buffer, 0);
action = BLK_NEEDS_REDO;
}
else
action = XLogReadBufferForRedo(lsn, record, 1,
xldata->node, xldata->blknoPostfix,
&buffer);
if (action == BLK_NEEDS_REDO)
{
page = BufferGetPage(buffer);
addOrReplaceTuple(page, (Item) postfixTuple,
postfixTupleHdr.size, xldata->offnumPostfix);
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
/* now handle the original page */
if (XLogReadBufferForRedo(lsn, record, 0, xldata->node, xldata->blknoPrefix,
&buffer) == BLK_NEEDS_REDO)
{
page = BufferGetPage(buffer);
PageIndexTupleDelete(page, xldata->offnumPrefix);
if (PageAddItem(page, (Item) prefixTuple, prefixTupleHdr.size,
xldata->offnumPrefix, false, false) != xldata->offnumPrefix)
elog(ERROR, "failed to add item of size %u to SPGiST index page",
prefixTupleHdr.size);
if (xldata->blknoPostfix == xldata->blknoPrefix)
addOrReplaceTuple(page, (Item) postfixTuple, postfixTupleHdr.size,
xldata->offnumPostfix);
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
示例12: gistplacetopage
//.........这里部分代码省略.........
*/
PageRestoreTempPage(dist->page, BufferGetPage(dist->buffer));
dist->page = BufferGetPage(dist->buffer);
/* Write the WAL record */
if (RelationNeedsWAL(rel))
recptr = gistXLogSplit(rel->rd_node, blkno, is_leaf,
dist, oldrlink, oldnsn, leftchildbuf,
markfollowright);
else
recptr = gistGetFakeLSN(rel);
for (ptr = dist; ptr; ptr = ptr->next)
{
PageSetLSN(ptr->page, recptr);
}
/*
* Return the new child buffers to the caller.
*
* If this was a root split, we've already inserted the downlink
* pointers, in the form of a new root page. Therefore we can release
* all the new buffers, and keep just the root page locked.
*/
if (is_rootsplit)
{
for (ptr = dist->next; ptr; ptr = ptr->next)
UnlockReleaseBuffer(ptr->buffer);
}
}
else
{
/*
* Enough space. We also get here if ntuples==0.
*/
START_CRIT_SECTION();
if (OffsetNumberIsValid(oldoffnum))
PageIndexTupleDelete(page, oldoffnum);
gistfillbuffer(page, itup, ntup, InvalidOffsetNumber);
MarkBufferDirty(buffer);
if (BufferIsValid(leftchildbuf))
MarkBufferDirty(leftchildbuf);
if (RelationNeedsWAL(rel))
{
OffsetNumber ndeloffs = 0,
deloffs[1];
if (OffsetNumberIsValid(oldoffnum))
{
deloffs[0] = oldoffnum;
ndeloffs = 1;
}
recptr = gistXLogUpdate(rel->rd_node, buffer,
deloffs, ndeloffs, itup, ntup,
leftchildbuf);
PageSetLSN(page, recptr);
}
else
{
recptr = gistGetFakeLSN(rel);
PageSetLSN(page, recptr);
}
if (newblkno)
*newblkno = blkno;
}
/*
* If we inserted the downlink for a child page, set NSN and clear
* F_FOLLOW_RIGHT flag on the left child, so that concurrent scans know to
* follow the rightlink if and only if they looked at the parent page
* before we inserted the downlink.
*
* Note that we do this *after* writing the WAL record. That means that
* the possible full page image in the WAL record does not include these
* changes, and they must be replayed even if the page is restored from
* the full page image. There's a chicken-and-egg problem: if we updated
* the child pages first, we wouldn't know the recptr of the WAL record
* we're about to write.
*/
if (BufferIsValid(leftchildbuf))
{
Page leftpg = BufferGetPage(leftchildbuf);
GistPageSetNSN(leftpg, recptr);
GistClearFollowRight(leftpg);
PageSetLSN(leftpg, recptr);
}
END_CRIT_SECTION();
return is_split;
}
示例13: gistbulkdelete
/*
* Bulk deletion of all index entries pointing to a set of heap tuples and
* check invalid tuples after crash recovery.
* The set of target tuples is specified via a callback routine that tells
* whether any given heap tuple (identified by ItemPointer) is being deleted.
*
* Result: a palloc'd struct containing statistical info for VACUUM displays.
*/
Datum
gistbulkdelete(PG_FUNCTION_ARGS)
{
IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
GistBulkDeleteResult *stats = (GistBulkDeleteResult *) PG_GETARG_POINTER(1);
IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(2);
void *callback_state = (void *) PG_GETARG_POINTER(3);
Relation rel = info->index;
GistBDItem *stack,
*ptr;
/* first time through? */
if (stats == NULL)
stats = (GistBulkDeleteResult *) palloc0(sizeof(GistBulkDeleteResult));
/* we'll re-count the tuples each time */
stats->std.num_index_tuples = 0;
stack = (GistBDItem *) palloc0(sizeof(GistBDItem));
stack->blkno = GIST_ROOT_BLKNO;
while (stack)
{
Buffer buffer = ReadBufferWithStrategy(rel, stack->blkno, info->strategy);
Page page;
OffsetNumber i,
maxoff;
IndexTuple idxtuple;
ItemId iid;
LockBuffer(buffer, GIST_SHARE);
gistcheckpage(rel, buffer);
page = (Page) BufferGetPage(buffer);
if (GistPageIsLeaf(page))
{
OffsetNumber todelete[MaxOffsetNumber];
int ntodelete = 0;
LockBuffer(buffer, GIST_UNLOCK);
LockBuffer(buffer, GIST_EXCLUSIVE);
page = (Page) BufferGetPage(buffer);
if (stack->blkno == GIST_ROOT_BLKNO && !GistPageIsLeaf(page))
{
/* only the root can become non-leaf during relock */
UnlockReleaseBuffer(buffer);
/* one more check */
continue;
}
/*
* check for split proceeded after look at parent, we should check
* it after relock
*/
pushStackIfSplited(page, stack);
/*
* Remove deletable tuples from page
*/
maxoff = PageGetMaxOffsetNumber(page);
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
iid = PageGetItemId(page, i);
idxtuple = (IndexTuple) PageGetItem(page, iid);
if (callback(&(idxtuple->t_tid), callback_state))
{
todelete[ntodelete] = i - ntodelete;
ntodelete++;
stats->std.tuples_removed += 1;
}
else
stats->std.num_index_tuples += 1;
}
if (ntodelete)
{
START_CRIT_SECTION();
MarkBufferDirty(buffer);
for (i = 0; i < ntodelete; i++)
PageIndexTupleDelete(page, todelete[i]);
GistMarkTuplesDeleted(page);
if (!rel->rd_istemp)
{
XLogRecData *rdata;
XLogRecPtr recptr;
gistxlogPageUpdate *xlinfo;
//.........这里部分代码省略.........
示例14: spgRedoSplitTuple
static void
spgRedoSplitTuple(XLogRecPtr lsn, XLogRecord *record)
{
char *ptr = XLogRecGetData(record);
spgxlogSplitTuple *xldata = (spgxlogSplitTuple *) ptr;
SpGistInnerTuple prefixTuple;
SpGistInnerTuple postfixTuple;
Buffer buffer;
Page page;
/* we assume this is adequately aligned */
ptr += sizeof(spgxlogSplitTuple);
prefixTuple = (SpGistInnerTuple) ptr;
ptr += prefixTuple->size;
postfixTuple = (SpGistInnerTuple) ptr;
/* insert postfix tuple first to avoid dangling link */
if (xldata->blknoPostfix != xldata->blknoPrefix &&
!(record->xl_info & XLR_BKP_BLOCK_2))
{
buffer = XLogReadBuffer(xldata->node, xldata->blknoPostfix,
xldata->newPage);
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
if (xldata->newPage)
SpGistInitBuffer(buffer, 0);
if (!XLByteLE(lsn, PageGetLSN(page)))
{
addOrReplaceTuple(page, (Item) postfixTuple,
postfixTuple->size, xldata->offnumPostfix);
PageSetLSN(page, lsn);
PageSetTLI(page, ThisTimeLineID);
MarkBufferDirty(buffer);
}
UnlockReleaseBuffer(buffer);
}
}
/* now handle the original page */
if (!(record->xl_info & XLR_BKP_BLOCK_1))
{
buffer = XLogReadBuffer(xldata->node, xldata->blknoPrefix, false);
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
{
PageIndexTupleDelete(page, xldata->offnumPrefix);
if (PageAddItem(page, (Item) prefixTuple, prefixTuple->size,
xldata->offnumPrefix, false, false) != xldata->offnumPrefix)
elog(ERROR, "failed to add item of size %u to SPGiST index page",
prefixTuple->size);
if (xldata->blknoPostfix == xldata->blknoPrefix)
addOrReplaceTuple(page, (Item) postfixTuple,
postfixTuple->size,
xldata->offnumPostfix);
PageSetLSN(page, lsn);
PageSetTLI(page, ThisTimeLineID);
MarkBufferDirty(buffer);
}
UnlockReleaseBuffer(buffer);
}
}
}
示例15: spgRedoAddNode
static void
spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
{
char *ptr = XLogRecGetData(record);
spgxlogAddNode *xldata = (spgxlogAddNode *) ptr;
SpGistInnerTuple innerTuple;
SpGistState state;
Buffer buffer;
Page page;
int bbi;
/* we assume this is adequately aligned */
ptr += sizeof(spgxlogAddNode);
innerTuple = (SpGistInnerTuple) ptr;
fillFakeState(&state, xldata->stateSrc);
if (xldata->blknoNew == InvalidBlockNumber)
{
/* update in place */
Assert(xldata->blknoParent == InvalidBlockNumber);
if (!(record->xl_info & XLR_BKP_BLOCK_1))
{
buffer = XLogReadBuffer(xldata->node, xldata->blkno, false);
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
{
PageIndexTupleDelete(page, xldata->offnum);
if (PageAddItem(page, (Item) innerTuple, innerTuple->size,
xldata->offnum,
false, false) != xldata->offnum)
elog(ERROR, "failed to add item of size %u to SPGiST index page",
innerTuple->size);
PageSetLSN(page, lsn);
PageSetTLI(page, ThisTimeLineID);
MarkBufferDirty(buffer);
}
UnlockReleaseBuffer(buffer);
}
}
}
else
{
/* Install new tuple first so redirect is valid */
if (!(record->xl_info & XLR_BKP_BLOCK_2))
{
buffer = XLogReadBuffer(xldata->node, xldata->blknoNew,
xldata->newPage);
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
if (xldata->newPage)
SpGistInitBuffer(buffer, 0);
if (!XLByteLE(lsn, PageGetLSN(page)))
{
addOrReplaceTuple(page, (Item) innerTuple,
innerTuple->size, xldata->offnumNew);
PageSetLSN(page, lsn);
PageSetTLI(page, ThisTimeLineID);
MarkBufferDirty(buffer);
}
UnlockReleaseBuffer(buffer);
}
}
/* Delete old tuple, replacing it with redirect or placeholder tuple */
if (!(record->xl_info & XLR_BKP_BLOCK_1))
{
buffer = XLogReadBuffer(xldata->node, xldata->blkno, false);
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
{
SpGistDeadTuple dt;
if (state.isBuild)
dt = spgFormDeadTuple(&state, SPGIST_PLACEHOLDER,
InvalidBlockNumber,
InvalidOffsetNumber);
else
dt = spgFormDeadTuple(&state, SPGIST_REDIRECT,
xldata->blknoNew,
xldata->offnumNew);
PageIndexTupleDelete(page, xldata->offnum);
if (PageAddItem(page, (Item) dt, dt->size,
xldata->offnum,
false, false) != xldata->offnum)
elog(ERROR, "failed to add item of size %u to SPGiST index page",
dt->size);
if (state.isBuild)
SpGistPageGetOpaque(page)->nPlaceholder++;
//.........这里部分代码省略.........