本文整理汇总了C++中BufferGetBlockNumber函数的典型用法代码示例。如果您正苦于以下问题:C++ BufferGetBlockNumber函数的具体用法?C++ BufferGetBlockNumber怎么用?C++ BufferGetBlockNumber使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了BufferGetBlockNumber函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: spgbuild
/*
* Build an SP-GiST index.
*/
Datum
spgbuild(PG_FUNCTION_ARGS)
{
Relation heap = (Relation) PG_GETARG_POINTER(0);
Relation index = (Relation) PG_GETARG_POINTER(1);
IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
IndexBuildResult *result;
double reltuples;
SpGistBuildState buildstate;
Buffer metabuffer,
rootbuffer;
if (RelationGetNumberOfBlocks(index) != 0)
elog(ERROR, "index \"%s\" already contains data",
RelationGetRelationName(index));
/*
* Initialize the meta page and root page
*/
metabuffer = SpGistNewBuffer(index);
rootbuffer = SpGistNewBuffer(index);
Assert(BufferGetBlockNumber(metabuffer) == SPGIST_METAPAGE_BLKNO);
Assert(BufferGetBlockNumber(rootbuffer) == SPGIST_HEAD_BLKNO);
START_CRIT_SECTION();
SpGistInitMetapage(BufferGetPage(metabuffer));
MarkBufferDirty(metabuffer);
SpGistInitBuffer(rootbuffer, SPGIST_LEAF);
MarkBufferDirty(rootbuffer);
if (RelationNeedsWAL(index))
{
XLogRecPtr recptr;
XLogRecData rdata;
/* WAL data is just the relfilenode */
rdata.data = (char *) &(index->rd_node);
rdata.len = sizeof(RelFileNode);
rdata.buffer = InvalidBuffer;
rdata.next = NULL;
recptr = XLogInsert(RM_SPGIST_ID, XLOG_SPGIST_CREATE_INDEX, &rdata);
PageSetLSN(BufferGetPage(metabuffer), recptr);
PageSetTLI(BufferGetPage(metabuffer), ThisTimeLineID);
PageSetLSN(BufferGetPage(rootbuffer), recptr);
PageSetTLI(BufferGetPage(rootbuffer), ThisTimeLineID);
}
END_CRIT_SECTION();
UnlockReleaseBuffer(metabuffer);
UnlockReleaseBuffer(rootbuffer);
/*
* Now insert all the heap data into the index
*/
initSpGistState(&buildstate.spgstate, index);
buildstate.spgstate.isBuild = true;
buildstate.tmpCtx = AllocSetContextCreate(CurrentMemoryContext,
"SP-GiST build temporary context",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
reltuples = IndexBuildHeapScan(heap, index, indexInfo, true,
spgistBuildCallback, (void *) &buildstate);
MemoryContextDelete(buildstate.tmpCtx);
SpGistUpdateMetaPage(index);
result = (IndexBuildResult *) palloc0(sizeof(IndexBuildResult));
result->heap_tuples = result->index_tuples = reltuples;
PG_RETURN_POINTER(result);
}
示例2: gistXLogSplit
/*
* Write WAL record of a page split.
*/
XLogRecPtr
gistXLogSplit(RelFileNode node, BlockNumber blkno, bool page_is_leaf,
SplitedPageLayout *dist,
BlockNumber origrlink, GistNSN orignsn,
Buffer leftchildbuf)
{
XLogRecData *rdata;
gistxlogPageSplit xlrec;
SplitedPageLayout *ptr;
int npage = 0,
cur;
XLogRecPtr recptr;
for (ptr = dist; ptr; ptr = ptr->next)
npage++;
rdata = (XLogRecData *) palloc(sizeof(XLogRecData) * (npage * 2 + 2));
xlrec.node = node;
xlrec.origblkno = blkno;
xlrec.origrlink = origrlink;
xlrec.orignsn = orignsn;
xlrec.origleaf = page_is_leaf;
xlrec.npage = (uint16) npage;
xlrec.leftchild =
BufferIsValid(leftchildbuf) ? BufferGetBlockNumber(leftchildbuf) : InvalidBlockNumber;
rdata[0].data = (char *) &xlrec;
rdata[0].len = sizeof(gistxlogPageSplit);
rdata[0].buffer = InvalidBuffer;
cur = 1;
/*
* Include a full page image of the child buf. (only necessary if a
* checkpoint happened since the child page was split)
*/
if (BufferIsValid(leftchildbuf))
{
rdata[cur - 1].next = &(rdata[cur]);
rdata[cur].data = NULL;
rdata[cur].len = 0;
rdata[cur].buffer = leftchildbuf;
rdata[cur].buffer_std = true;
cur++;
}
for (ptr = dist; ptr; ptr = ptr->next)
{
rdata[cur - 1].next = &(rdata[cur]);
rdata[cur].buffer = InvalidBuffer;
rdata[cur].data = (char *) &(ptr->block);
rdata[cur].len = sizeof(gistxlogPage);
cur++;
rdata[cur - 1].next = &(rdata[cur]);
rdata[cur].buffer = InvalidBuffer;
rdata[cur].data = (char *) (ptr->list);
rdata[cur].len = ptr->lenlist;
cur++;
}
rdata[cur - 1].next = NULL;
recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_SPLIT, rdata);
pfree(rdata);
return recptr;
}
示例3: btvacuumpage
//.........这里部分代码省略.........
/*
* During Hot Standby we currently assume that
* XLOG_BTREE_VACUUM records do not produce conflicts. That is
* only true as long as the callback function depends only
* upon whether the index tuple refers to heap tuples removed
* in the initial heap scan. When vacuum starts it derives a
* value of OldestXmin. Backends taking later snapshots could
* have a RecentGlobalXmin with a later xid than the vacuum's
* OldestXmin, so it is possible that row versions deleted
* after OldestXmin could be marked as killed by other
* backends. The callback function *could* look at the index
* tuple state in isolation and decide to delete the index
* tuple, though currently it does not. If it ever did, we
* would need to reconsider whether XLOG_BTREE_VACUUM records
* should cause conflicts. If they did cause conflicts they
* would be fairly harsh conflicts, since we haven't yet
* worked out a way to pass a useful value for
* latestRemovedXid on the XLOG_BTREE_VACUUM records. This
* applies to *any* type of index that marks index tuples as
* killed.
*/
if (callback(htup, callback_state))
deletable[ndeletable++] = offnum;
}
}
/*
* Apply any needed deletes. We issue just one _bt_delitems() call
* per page, so as to minimize WAL traffic.
*/
if (ndeletable > 0)
{
BlockNumber lastBlockVacuumed = BufferGetBlockNumber(buf);
_bt_delitems_vacuum(rel, buf, deletable, ndeletable, vstate->lastBlockVacuumed);
/*
* Keep track of the block number of the lastBlockVacuumed, so we
* can scan those blocks as well during WAL replay. This then
* provides concurrency protection and allows btrees to be used
* while in recovery.
*/
if (lastBlockVacuumed > vstate->lastBlockVacuumed)
vstate->lastBlockVacuumed = lastBlockVacuumed;
stats->tuples_removed += ndeletable;
/* must recompute maxoff */
maxoff = PageGetMaxOffsetNumber(page);
}
else
{
/*
* If the page has been split during this vacuum cycle, it seems
* worth expending a write to clear btpo_cycleid even if we don't
* have any deletions to do. (If we do, _bt_delitems takes care
* of this.) This ensures we won't process the page again.
*
* We treat this like a hint-bit update because there's no need to
* WAL-log it.
*/
if (vstate->cycleid != 0 &&
opaque->btpo_cycleid == vstate->cycleid)
{
opaque->btpo_cycleid = 0;
SetBufferCommitInfoNeedsSave(buf);
示例4: _bt_pagedel
/*
* _bt_pagedel() -- Delete a page from the b-tree, if legal to do so.
*
* This action unlinks the page from the b-tree structure, removing all
* pointers leading to it --- but not touching its own left and right links.
* The page cannot be physically reclaimed right away, since other processes
* may currently be trying to follow links leading to the page; they have to
* be allowed to use its right-link to recover. See nbtree/README.
*
* On entry, the target buffer must be pinned and locked (either read or write
* lock is OK). This lock and pin will be dropped before exiting.
*
* The "stack" argument can be a search stack leading (approximately) to the
* target page, or NULL --- outside callers typically pass NULL since they
* have not done such a search, but internal recursion cases pass the stack
* to avoid duplicated search effort.
*
* Returns the number of pages successfully deleted (zero if page cannot
* be deleted now; could be more than one if parent pages were deleted too).
*
* NOTE: this leaks memory. Rather than trying to clean up everything
* carefully, it's better to run it in a temp context that can be reset
* frequently.
*/
int
_bt_pagedel(Relation rel, Buffer buf, BTStack stack)
{
int result;
BlockNumber target,
leftsib,
rightsib,
parent;
OffsetNumber poffset,
maxoff;
uint32 targetlevel,
ilevel;
ItemId itemid;
IndexTuple targetkey,
itup;
ScanKey itup_scankey;
Buffer lbuf,
rbuf,
pbuf;
bool parent_half_dead;
bool parent_one_child;
bool rightsib_empty;
Buffer metabuf = InvalidBuffer;
Page metapg = NULL;
BTMetaPageData *metad = NULL;
Page page;
BTPageOpaque opaque;
/*
* We can never delete rightmost pages nor root pages. While at it, check
* that page is not already deleted and is empty.
*/
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque) ||
P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page))
{
/* Should never fail to delete a half-dead page */
Assert(!P_ISHALFDEAD(opaque));
_bt_relbuf(rel, buf);
return 0;
}
/*
* Save info about page, including a copy of its high key (it must have
* one, being non-rightmost).
*/
target = BufferGetBlockNumber(buf);
targetlevel = opaque->btpo.level;
leftsib = opaque->btpo_prev;
itemid = PageGetItemId(page, P_HIKEY);
targetkey = CopyIndexTuple((IndexTuple) PageGetItem(page, itemid));
/*
* To avoid deadlocks, we'd better drop the target page lock before going
* further.
*/
_bt_relbuf(rel, buf);
/*
* We need an approximate pointer to the page's parent page. We use the
* standard search mechanism to search for the page's high key; this will
* give us a link to either the current parent or someplace to its left
* (if there are multiple equal high keys). In recursion cases, the
* caller already generated a search stack and we can just re-use that
* work.
*/
if (stack == NULL)
{
if (!InRecovery)
{
/* we need an insertion scan key to do our search, so build one */
itup_scankey = _bt_mkscankey(rel, targetkey);
/* find the leftmost leaf page containing this key */
stack = _bt_search(rel, rel->rd_rel->relnatts, itup_scankey, false,
//.........这里部分代码省略.........
示例5: _bt_getroot
/*
* _bt_getroot() -- Get the root page of the btree.
*
* Since the root page can move around the btree file, we have to read
* its location from the metadata page, and then read the root page
* itself. If no root page exists yet, we have to create one. The
* standard class of race conditions exists here; I think I covered
* them all in the Hopi Indian rain dance of lock requests below.
*
* The access type parameter (BT_READ or BT_WRITE) controls whether
* a new root page will be created or not. If access = BT_READ,
* and no root page exists, we just return InvalidBuffer. For
* BT_WRITE, we try to create the root page if it doesn't exist.
* NOTE that the returned root page will have only a read lock set
* on it even if access = BT_WRITE!
*
* The returned page is not necessarily the true root --- it could be
* a "fast root" (a page that is alone in its level due to deletions).
* Also, if the root page is split while we are "in flight" to it,
* what we will return is the old root, which is now just the leftmost
* page on a probably-not-very-wide level. For most purposes this is
* as good as or better than the true root, so we do not bother to
* insist on finding the true root. We do, however, guarantee to
* return a live (not deleted or half-dead) page.
*
* On successful return, the root page is pinned and read-locked.
* The metadata page is not locked or pinned on exit.
*/
Buffer
_bt_getroot(Relation rel, int access)
{
Buffer metabuf;
Page metapg;
BTPageOpaque metaopaque;
Buffer rootbuf;
Page rootpage;
BTPageOpaque rootopaque;
BlockNumber rootblkno;
uint32 rootlevel;
BTMetaPageData *metad;
metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
metapg = BufferGetPage(metabuf);
metaopaque = (BTPageOpaque) PageGetSpecialPointer(metapg);
metad = BTPageGetMeta(metapg);
/* sanity-check the metapage */
if (!(metaopaque->btpo_flags & BTP_META) ||
metad->btm_magic != BTREE_MAGIC)
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
errmsg("index \"%s\" is not a btree",
RelationGetRelationName(rel))));
if (metad->btm_version != BTREE_VERSION)
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
errmsg("version mismatch in index \"%s\": file version %d, code version %d",
RelationGetRelationName(rel),
metad->btm_version, BTREE_VERSION)));
/* if no root page initialized yet, do it */
if (metad->btm_root == P_NONE)
{
/* If access = BT_READ, caller doesn't want us to create root yet */
if (access == BT_READ)
{
_bt_relbuf(rel, metabuf);
return InvalidBuffer;
}
/* trade in our read lock for a write lock */
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
LockBuffer(metabuf, BT_WRITE);
/*
* Race condition: if someone else initialized the metadata between
* the time we released the read lock and acquired the write lock, we
* must avoid doing it again.
*/
if (metad->btm_root != P_NONE)
{
/*
* Metadata initialized by someone else. In order to guarantee no
* deadlocks, we have to release the metadata page and start all
* over again. (Is that really true? But it's hardly worth trying
* to optimize this case.)
*/
_bt_relbuf(rel, metabuf);
return _bt_getroot(rel, access);
}
/*
* Get, initialize, write, and leave a lock of the appropriate type on
* the new root page. Since this is the first page in the tree, it's
* a leaf as well as the root.
*/
rootbuf = _bt_getbuf(rel, P_NEW, BT_WRITE);
rootblkno = BufferGetBlockNumber(rootbuf);
rootpage = BufferGetPage(rootbuf);
//.........这里部分代码省略.........
示例6: gistbuild
/*
* Routine to build an index. Basically calls insert over and over.
*
* XXX: it would be nice to implement some sort of bulk-loading
* algorithm, but it is not clear how to do that.
*/
Datum
gistbuild(PG_FUNCTION_ARGS)
{
Relation heap = (Relation) PG_GETARG_POINTER(0);
Relation index = (Relation) PG_GETARG_POINTER(1);
IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
IndexBuildResult *result;
double reltuples;
GISTBuildState buildstate;
Buffer buffer;
Page page;
/*
* We expect to be called exactly once for any index relation. If that's
* not the case, big trouble's what we have.
*/
if (RelationGetNumberOfBlocks(index) != 0)
elog(ERROR, "index \"%s\" already contains data",
RelationGetRelationName(index));
/* no locking is needed */
initGISTstate(&buildstate.giststate, index);
/* initialize the root page */
buffer = gistNewBuffer(index);
Assert(BufferGetBlockNumber(buffer) == GIST_ROOT_BLKNO);
page = BufferGetPage(buffer);
START_CRIT_SECTION();
GISTInitBuffer(buffer, F_LEAF);
MarkBufferDirty(buffer);
if (RelationNeedsWAL(index))
{
XLogRecPtr recptr;
XLogRecData rdata;
rdata.data = (char *) &(index->rd_node);
rdata.len = sizeof(RelFileNode);
rdata.buffer = InvalidBuffer;
rdata.next = NULL;
recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_CREATE_INDEX, &rdata);
PageSetLSN(page, recptr);
PageSetTLI(page, ThisTimeLineID);
}
else
PageSetLSN(page, GetXLogRecPtrForTemp());
UnlockReleaseBuffer(buffer);
END_CRIT_SECTION();
/* build the index */
buildstate.numindexattrs = indexInfo->ii_NumIndexAttrs;
buildstate.indtuples = 0;
/*
* create a temporary memory context that is reset once for each tuple
* inserted into the index
*/
buildstate.tmpCtx = createTempGistContext();
/* do the heap scan */
reltuples = IndexBuildHeapScan(heap, index, indexInfo, true,
gistbuildCallback, (void *) &buildstate);
/* okay, all heap tuples are indexed */
MemoryContextDelete(buildstate.tmpCtx);
freeGISTstate(&buildstate.giststate);
/*
* Return statistics
*/
result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult));
result->heap_tuples = reltuples;
result->index_tuples = buildstate.indtuples;
PG_RETURN_POINTER(result);
}
示例7: _bt_delitems_delete
void
_bt_delitems_delete(Relation rel, Buffer buf,
OffsetNumber *itemnos, int nitems, Relation heapRel)
{
Page page = BufferGetPage(buf);
BTPageOpaque opaque;
Assert(nitems > 0);
/* No ereport(ERROR) until changes are logged */
START_CRIT_SECTION();
/* Fix the page */
PageIndexMultiDelete(page, itemnos, nitems);
/*
* We can clear the vacuum cycle ID since this page has certainly been
* processed by the current vacuum scan.
*/
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
opaque->btpo_cycleid = 0;
/*
* Mark the page as not containing any LP_DEAD items. This is not
* certainly true (there might be some that have recently been marked, but
* weren't included in our target-item list), but it will almost always be
* true and it doesn't seem worth an additional page scan to check it.
* Remember that BTP_HAS_GARBAGE is only a hint anyway.
*/
opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
MarkBufferDirty(buf);
/* XLOG stuff */
if (RelationNeedsWAL(rel))
{
XLogRecPtr recptr;
XLogRecData rdata[3];
xl_btree_delete xlrec_delete;
xlrec_delete.node = rel->rd_node;
xlrec_delete.hnode = heapRel->rd_node;
xlrec_delete.block = BufferGetBlockNumber(buf);
xlrec_delete.nitems = nitems;
rdata[0].data = (char *) &xlrec_delete;
rdata[0].len = SizeOfBtreeDelete;
rdata[0].buffer = InvalidBuffer;
rdata[0].next = &(rdata[1]);
/*
* We need the target-offsets array whether or not we store the to
* allow us to find the latestRemovedXid on a standby server.
*/
rdata[1].data = (char *) itemnos;
rdata[1].len = nitems * sizeof(OffsetNumber);
rdata[1].buffer = InvalidBuffer;
rdata[1].next = &(rdata[2]);
rdata[2].data = NULL;
rdata[2].len = 0;
rdata[2].buffer = buf;
rdata[2].buffer_std = true;
rdata[2].next = NULL;
recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_DELETE, rdata);
PageSetLSN(page, recptr);
PageSetTLI(page, ThisTimeLineID);
}
END_CRIT_SECTION();
}
示例8: _bt_endpoint
/*
* _bt_endpoint() -- Find the first or last page in the index, and scan
* from there to the first key satisfying all the quals.
*
* This is used by _bt_first() to set up a scan when we've determined
* that the scan must start at the beginning or end of the index (for
* a forward or backward scan respectively). Exit conditions are the
* same as for _bt_first().
*/
static bool
_bt_endpoint(IndexScanDesc scan, ScanDirection dir)
{
Relation rel = scan->indexRelation;
BTScanOpaque so = (BTScanOpaque) scan->opaque;
Buffer buf;
Page page;
BTPageOpaque opaque;
OffsetNumber start;
BTScanPosItem *currItem;
/*
* Scan down to the leftmost or rightmost leaf page. This is a simplified
* version of _bt_search(). We don't maintain a stack since we know we
* won't need it.
*/
buf = _bt_get_endpoint(rel, 0, ScanDirectionIsBackward(dir));
if (!BufferIsValid(buf))
{
/*
* Empty index. Lock the whole relation, as nothing finer to lock
* exists.
*/
PredicateLockRelation(rel, scan->xs_snapshot);
so->currPos.buf = InvalidBuffer;
return false;
}
PredicateLockPage(rel, BufferGetBlockNumber(buf), scan->xs_snapshot);
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
Assert(P_ISLEAF(opaque));
if (ScanDirectionIsForward(dir))
{
/* There could be dead pages to the left, so not this: */
/* Assert(P_LEFTMOST(opaque)); */
start = P_FIRSTDATAKEY(opaque);
}
else if (ScanDirectionIsBackward(dir))
{
Assert(P_RIGHTMOST(opaque));
start = PageGetMaxOffsetNumber(page);
}
else
{
elog(ERROR, "invalid scan direction: %d", (int) dir);
start = 0; /* keep compiler quiet */
}
/* remember which buffer we have pinned */
so->currPos.buf = buf;
/* initialize moreLeft/moreRight appropriately for scan direction */
if (ScanDirectionIsForward(dir))
{
so->currPos.moreLeft = false;
so->currPos.moreRight = true;
}
else
{
so->currPos.moreLeft = true;
so->currPos.moreRight = false;
}
so->numKilled = 0; /* just paranoia */
so->markItemIndex = -1; /* ditto */
/*
* Now load data from the first page of the scan.
*/
if (!_bt_readpage(scan, dir, start))
{
/*
* There's no actually-matching data on this page. Try to advance to
* the next page. Return false if there's no matching data at all.
*/
if (!_bt_steppage(scan, dir))
return false;
}
/* Drop the lock, but not pin, on the current page */
LockBuffer(so->currPos.buf, BUFFER_LOCK_UNLOCK);
/* OK, itemIndex says what to return */
currItem = &so->currPos.items[so->currPos.itemIndex];
scan->xs_ctup.t_self = currItem->heapTid;
if (scan->xs_want_itup)
scan->xs_itup = (IndexTuple) (so->currTuples + currItem->tupleOffset);
//.........这里部分代码省略.........
示例9: _bt_first
//.........这里部分代码省略.........
* Find first item > scankey. (This is only used for forward
* scans.)
*/
nextkey = true;
goback = false;
break;
default:
/* can't get here, but keep compiler quiet */
elog(ERROR, "unrecognized strat_total: %d", (int) strat_total);
return false;
}
/*
* Use the manufactured insertion scan key to descend the tree and
* position ourselves on the target leaf page.
*/
stack = _bt_search(rel, keysCount, scankeys, nextkey, &buf, BT_READ);
/* don't need to keep the stack around... */
_bt_freestack(stack);
/* remember which buffer we have pinned, if any */
so->currPos.buf = buf;
if (!BufferIsValid(buf))
{
/*
* We only get here if the index is completely empty. Lock relation
* because nothing finer to lock exists.
*/
PredicateLockRelation(rel, scan->xs_snapshot);
return false;
}
else
PredicateLockPage(rel, BufferGetBlockNumber(buf),
scan->xs_snapshot);
/* initialize moreLeft/moreRight appropriately for scan direction */
if (ScanDirectionIsForward(dir))
{
so->currPos.moreLeft = false;
so->currPos.moreRight = true;
}
else
{
so->currPos.moreLeft = true;
so->currPos.moreRight = false;
}
so->numKilled = 0; /* just paranoia */
so->markItemIndex = -1; /* ditto */
/* position to the precise item on the page */
offnum = _bt_binsrch(rel, buf, keysCount, scankeys, nextkey);
/*
* If nextkey = false, we are positioned at the first item >= scan key, or
* possibly at the end of a page on which all the existing items are less
* than the scan key and we know that everything on later pages is greater
* than or equal to scan key.
*
* If nextkey = true, we are positioned at the first item > scan key, or
* possibly at the end of a page on which all the existing items are less
* than or equal to the scan key and we know that everything on later
* pages is greater than scan key.
*
* The actually desired starting point is either this item or the prior
* one, or in the end-of-page case it's the first item on the next page or
* the last item on this page. Adjust the starting offset if needed. (If
* this results in an offset before the first item or after the last one,
* _bt_readpage will report no items found, and then we'll step to the
* next page as needed.)
*/
if (goback)
offnum = OffsetNumberPrev(offnum);
/*
* Now load data from the first page of the scan.
*/
if (!_bt_readpage(scan, dir, offnum))
{
/*
* There's no actually-matching data on this page. Try to advance to
* the next page. Return false if there's no matching data at all.
*/
if (!_bt_steppage(scan, dir))
return false;
}
/* Drop the lock, but not pin, on the current page */
LockBuffer(so->currPos.buf, BUFFER_LOCK_UNLOCK);
/* OK, itemIndex says what to return */
currItem = &so->currPos.items[so->currPos.itemIndex];
scan->xs_ctup.t_self = currItem->heapTid;
if (scan->xs_want_itup)
scan->xs_itup = (IndexTuple) (so->currTuples + currItem->tupleOffset);
return true;
}
示例10: _bt_steppage
//.........这里部分代码省略.........
{
/* bump pin on current buffer for assignment to mark buffer */
IncrBufferRefCount(so->currPos.buf);
memcpy(&so->markPos, &so->currPos,
offsetof(BTScanPosData, items[1]) +
so->currPos.lastItem * sizeof(BTScanPosItem));
if (so->markTuples)
memcpy(so->markTuples, so->currTuples,
so->currPos.nextTupleOffset);
so->markPos.itemIndex = so->markItemIndex;
so->markItemIndex = -1;
}
rel = scan->indexRelation;
if (ScanDirectionIsForward(dir))
{
/* Walk right to the next page with data */
/* We must rely on the previously saved nextPage link! */
BlockNumber blkno = so->currPos.nextPage;
/* Remember we left a page with data */
so->currPos.moreLeft = true;
for (;;)
{
/* release the previous buffer */
_bt_relbuf(rel, so->currPos.buf);
so->currPos.buf = InvalidBuffer;
/* if we're at end of scan, give up */
if (blkno == P_NONE || !so->currPos.moreRight)
return false;
/* check for interrupts while we're not holding any buffer lock */
CHECK_FOR_INTERRUPTS();
/* step right one page */
so->currPos.buf = _bt_getbuf(rel, blkno, BT_READ);
/* check for deleted page */
page = BufferGetPage(so->currPos.buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
if (!P_IGNORE(opaque))
{
PredicateLockPage(rel, blkno, scan->xs_snapshot);
/* see if there are any matches on this page */
/* note that this will clear moreRight if we can stop */
if (_bt_readpage(scan, dir, P_FIRSTDATAKEY(opaque)))
break;
}
/* nope, keep going */
blkno = opaque->btpo_next;
}
}
else
{
/* Remember we left a page with data */
so->currPos.moreRight = true;
/*
* Walk left to the next page with data. This is much more complex
* than the walk-right case because of the possibility that the page
* to our left splits while we are in flight to it, plus the
* possibility that the page we were on gets deleted after we leave
* it. See nbtree/README for details.
*/
for (;;)
{
/* Done if we know there are no matching keys to the left */
if (!so->currPos.moreLeft)
{
_bt_relbuf(rel, so->currPos.buf);
so->currPos.buf = InvalidBuffer;
return false;
}
/* Step to next physical page */
so->currPos.buf = _bt_walk_left(rel, so->currPos.buf);
/* if we're physically at end of index, return failure */
if (so->currPos.buf == InvalidBuffer)
return false;
/*
* Okay, we managed to move left to a non-deleted page. Done if
* it's not half-dead and contains matching tuples. Else loop back
* and do it all again.
*/
page = BufferGetPage(so->currPos.buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
if (!P_IGNORE(opaque))
{
PredicateLockPage(rel, BufferGetBlockNumber(so->currPos.buf), scan->xs_snapshot);
/* see if there are any matches on this page */
/* note that this will clear moreLeft if we can stop */
if (_bt_readpage(scan, dir, PageGetMaxOffsetNumber(page)))
break;
}
}
}
return true;
}
示例11: _bt_walk_left
/*
* _bt_walk_left() -- step left one page, if possible
*
* The given buffer must be pinned and read-locked. This will be dropped
* before stepping left. On return, we have pin and read lock on the
* returned page, instead.
*
* Returns InvalidBuffer if there is no page to the left (no lock is held
* in that case).
*
* When working on a non-leaf level, it is possible for the returned page
* to be half-dead; the caller should check that condition and step left
* again if it's important.
*/
static Buffer
_bt_walk_left(Relation rel, Buffer buf)
{
Page page;
BTPageOpaque opaque;
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
for (;;)
{
BlockNumber obknum;
BlockNumber lblkno;
BlockNumber blkno;
int tries;
/* if we're at end of tree, release buf and return failure */
if (P_LEFTMOST(opaque))
{
_bt_relbuf(rel, buf);
break;
}
/* remember original page we are stepping left from */
obknum = BufferGetBlockNumber(buf);
/* step left */
blkno = lblkno = opaque->btpo_prev;
_bt_relbuf(rel, buf);
/* check for interrupts while we're not holding any buffer lock */
CHECK_FOR_INTERRUPTS();
buf = _bt_getbuf(rel, blkno, BT_READ);
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/*
* If this isn't the page we want, walk right till we find what we
* want --- but go no more than four hops (an arbitrary limit). If we
* don't find the correct page by then, the most likely bet is that
* the original page got deleted and isn't in the sibling chain at all
* anymore, not that its left sibling got split more than four times.
*
* Note that it is correct to test P_ISDELETED not P_IGNORE here,
* because half-dead pages are still in the sibling chain. Caller
* must reject half-dead pages if wanted.
*/
tries = 0;
for (;;)
{
if (!P_ISDELETED(opaque) && opaque->btpo_next == obknum)
{
/* Found desired page, return it */
return buf;
}
if (P_RIGHTMOST(opaque) || ++tries > 4)
break;
blkno = opaque->btpo_next;
buf = _bt_relandgetbuf(rel, buf, blkno, BT_READ);
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
}
/* Return to the original page to see what's up */
buf = _bt_relandgetbuf(rel, buf, obknum, BT_READ);
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
if (P_ISDELETED(opaque))
{
/*
* It was deleted. Move right to first nondeleted page (there
* must be one); that is the page that has acquired the deleted
* one's keyspace, so stepping left from it will take us where we
* want to be.
*/
for (;;)
{
if (P_RIGHTMOST(opaque))
elog(ERROR, "fell off the end of index \"%s\"",
RelationGetRelationName(rel));
blkno = opaque->btpo_next;
buf = _bt_relandgetbuf(rel, buf, blkno, BT_READ);
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
if (!P_ISDELETED(opaque))
break;
}
/*
//.........这里部分代码省略.........
示例12: _hash_freeovflpage
/*
* _hash_freeovflpage() -
*
* Remove this overflow page from its bucket's chain, and mark the page as
* free. On entry, ovflbuf is write-locked; it is released before exiting.
*
* Add the tuples (itups) to wbuf in this function. We could do that in the
* caller as well, but the advantage of doing it here is we can easily write
* the WAL for XLOG_HASH_SQUEEZE_PAGE operation. Addition of tuples and
* removal of overflow page has to done as an atomic operation, otherwise
* during replay on standby users might find duplicate records.
*
* Since this function is invoked in VACUUM, we provide an access strategy
* parameter that controls fetches of the bucket pages.
*
* Returns the block number of the page that followed the given page
* in the bucket, or InvalidBlockNumber if no following page.
*
* NB: caller must not hold lock on metapage, nor on page, that's next to
* ovflbuf in the bucket chain. We don't acquire the lock on page that's
* prior to ovflbuf in chain if it is same as wbuf because the caller already
* has a lock on same.
*/
BlockNumber
_hash_freeovflpage(Relation rel, Buffer bucketbuf, Buffer ovflbuf,
Buffer wbuf, IndexTuple *itups, OffsetNumber *itup_offsets,
Size *tups_size, uint16 nitups,
BufferAccessStrategy bstrategy)
{
HashMetaPage metap;
Buffer metabuf;
Buffer mapbuf;
BlockNumber ovflblkno;
BlockNumber prevblkno;
BlockNumber blkno;
BlockNumber nextblkno;
BlockNumber writeblkno;
HashPageOpaque ovflopaque;
Page ovflpage;
Page mappage;
uint32 *freep;
uint32 ovflbitno;
int32 bitmappage,
bitmapbit;
Bucket bucket PG_USED_FOR_ASSERTS_ONLY;
Buffer prevbuf = InvalidBuffer;
Buffer nextbuf = InvalidBuffer;
bool update_metap = false;
/* Get information from the doomed page */
_hash_checkpage(rel, ovflbuf, LH_OVERFLOW_PAGE);
ovflblkno = BufferGetBlockNumber(ovflbuf);
ovflpage = BufferGetPage(ovflbuf);
ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage);
nextblkno = ovflopaque->hasho_nextblkno;
prevblkno = ovflopaque->hasho_prevblkno;
writeblkno = BufferGetBlockNumber(wbuf);
bucket = ovflopaque->hasho_bucket;
/*
* Fix up the bucket chain. this is a doubly-linked list, so we must fix
* up the bucket chain members behind and ahead of the overflow page being
* deleted. Concurrency issues are avoided by using lock chaining as
* described atop hashbucketcleanup.
*/
if (BlockNumberIsValid(prevblkno))
{
if (prevblkno == writeblkno)
prevbuf = wbuf;
else
prevbuf = _hash_getbuf_with_strategy(rel,
prevblkno,
HASH_WRITE,
LH_BUCKET_PAGE | LH_OVERFLOW_PAGE,
bstrategy);
}
if (BlockNumberIsValid(nextblkno))
nextbuf = _hash_getbuf_with_strategy(rel,
nextblkno,
HASH_WRITE,
LH_OVERFLOW_PAGE,
bstrategy);
/* Note: bstrategy is intentionally not used for metapage and bitmap */
/* Read the metapage so we can determine which bitmap page to use */
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE);
metap = HashPageGetMeta(BufferGetPage(metabuf));
/* Identify which bit to set */
ovflbitno = _hash_ovflblkno_to_bitno(metap, ovflblkno);
bitmappage = ovflbitno >> BMPG_SHIFT(metap);
bitmapbit = ovflbitno & BMPG_MASK(metap);
if (bitmappage >= metap->hashm_nmaps)
elog(ERROR, "invalid overflow bit number %u", ovflbitno);
blkno = metap->hashm_mapp[bitmappage];
/* Release metapage lock while we access the bitmap page */
//.........这里部分代码省略.........
示例13: _hash_addovflpage
//.........这里部分代码省略.........
* and other on new overflow page) since there cannot be anyone else
* contending for access to ovflbuf.
*/
ovflbuf = _hash_getnewbuf(rel, blkno, MAIN_FORKNUM);
found:
/*
* Do the update. No ereport(ERROR) until changes are logged. We want to
* log the changes for bitmap page and overflow page together to avoid
* loss of pages in case the new page is added.
*/
START_CRIT_SECTION();
if (page_found)
{
Assert(BufferIsValid(mapbuf));
/* mark page "in use" in the bitmap */
SETBIT(freep, bitmap_page_bit);
MarkBufferDirty(mapbuf);
}
else
{
/* update the count to indicate new overflow page is added */
metap->hashm_spares[splitnum]++;
if (BufferIsValid(newmapbuf))
{
_hash_initbitmapbuffer(newmapbuf, metap->hashm_bmsize, false);
MarkBufferDirty(newmapbuf);
/* add the new bitmap page to the metapage's list of bitmaps */
metap->hashm_mapp[metap->hashm_nmaps] = BufferGetBlockNumber(newmapbuf);
metap->hashm_nmaps++;
metap->hashm_spares[splitnum]++;
}
MarkBufferDirty(metabuf);
/*
* for new overflow page, we don't need to explicitly set the bit in
* bitmap page, as by default that will be set to "in use".
*/
}
/*
* Adjust hashm_firstfree to avoid redundant searches. But don't risk
* changing it if someone moved it while we were searching bitmap pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
{
metap->hashm_firstfree = bit + 1;
MarkBufferDirty(metabuf);
}
/* initialize new overflow page */
ovflpage = BufferGetPage(ovflbuf);
ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage);
ovflopaque->hasho_prevblkno = BufferGetBlockNumber(buf);
ovflopaque->hasho_nextblkno = InvalidBlockNumber;
ovflopaque->hasho_bucket = pageopaque->hasho_bucket;
ovflopaque->hasho_flag = LH_OVERFLOW_PAGE;
ovflopaque->hasho_page_id = HASHO_PAGE_ID;
MarkBufferDirty(ovflbuf);
示例14: heap_prune_chain
//.........这里部分代码省略.........
case HEAPTUPLE_LIVE:
case HEAPTUPLE_INSERT_IN_PROGRESS:
/*
* If we wanted to optimize for aborts, we might consider
* marking the page prunable when we see INSERT_IN_PROGRESS.
* But we don't. See related decisions about when to mark the
* page prunable in heapam.c.
*/
break;
default:
elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
break;
}
/*
* Remember the last DEAD tuple seen. We will advance past
* RECENTLY_DEAD tuples just in case there's a DEAD one after them;
* but we can't advance past anything else. (XXX is it really worth
* continuing to scan beyond RECENTLY_DEAD? The case where we will
* find another DEAD tuple is a fairly unusual corner case.)
*/
if (tupdead)
{
latestdead = offnum;
HeapTupleHeaderAdvanceLatestRemovedXid(htup,
&prstate->latestRemovedXid);
}
else if (!recent_dead)
break;
/*
* If the tuple is not HOT-updated, then we are at the end of this
* HOT-update chain.
*/
if (!HeapTupleHeaderIsHotUpdated(htup))
break;
/*
* Advance to next chain member.
*/
Assert(ItemPointerGetBlockNumber(&htup->t_ctid) ==
BufferGetBlockNumber(buffer));
offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
priorXmax = HeapTupleHeaderGetUpdateXid(htup);
}
/*
* If we found a DEAD tuple in the chain, adjust the HOT chain so that all
* the DEAD tuples at the start of the chain are removed and the root line
* pointer is appropriately redirected.
*/
if (OffsetNumberIsValid(latestdead))
{
/*
* Mark as unused each intermediate item that we are able to remove
* from the chain.
*
* When the previous item is the last dead tuple seen, we are at the
* right candidate for redirection.
*/
for (i = 1; (i < nchain) && (chainitems[i - 1] != latestdead); i++)
{
heap_prune_record_unused(prstate, chainitems[i]);
ndeleted++;
}
/*
* If the root entry had been a normal tuple, we are deleting it, so
* count it in the result. But changing a redirect (even to DEAD
* state) doesn't count.
*/
if (ItemIdIsNormal(rootlp))
ndeleted++;
/*
* If the DEAD tuple is at the end of the chain, the entire chain is
* dead and the root line pointer can be marked dead. Otherwise just
* redirect the root to the correct chain member.
*/
if (i >= nchain)
heap_prune_record_dead(prstate, rootoffnum);
else
heap_prune_record_redirect(prstate, rootoffnum, chainitems[i]);
}
else if (nchain < 2 && ItemIdIsRedirected(rootlp))
{
/*
* We found a redirect item that doesn't point to a valid follow-on
* item. This can happen if the loop in heap_page_prune caused us to
* visit the dead successor of a redirect item before visiting the
* redirect item. We can clean up by setting the redirect item to
* DEAD state.
*/
heap_prune_record_dead(prstate, rootoffnum);
}
return ndeleted;
}
示例15: gistplacetopage
static bool
gistplacetopage(GISTInsertState *state, GISTSTATE *giststate)
{
bool is_splitted = false;
bool is_leaf = (GistPageIsLeaf(state->stack->page)) ? true : false;
MIRROREDLOCK_BUFMGR_MUST_ALREADY_BE_HELD;
/*
* if (!is_leaf) remove old key: This node's key has been modified, either
* because a child split occurred or because we needed to adjust our key
* for an insert in a child node. Therefore, remove the old version of
* this node's key.
*
* for WAL replay, in the non-split case we handle this by setting up a
* one-element todelete array; in the split case, it's handled implicitly
* because the tuple vector passed to gistSplit won't include this tuple.
*
* XXX: If we want to change fillfactors between node and leaf, fillfactor
* = (is_leaf ? state->leaf_fillfactor : state->node_fillfactor)
*/
if (gistnospace(state->stack->page, state->itup, state->ituplen,
is_leaf ? InvalidOffsetNumber : state->stack->childoffnum,
state->freespace))
{
/* no space for insertion */
IndexTuple *itvec;
int tlen;
SplitedPageLayout *dist = NULL,
*ptr;
BlockNumber rrlink = InvalidBlockNumber;
GistNSN oldnsn;
is_splitted = true;
/*
* Form index tuples vector to split: remove old tuple if t's needed
* and add new tuples to vector
*/
itvec = gistextractpage(state->stack->page, &tlen);
if (!is_leaf)
{
/* on inner page we should remove old tuple */
int pos = state->stack->childoffnum - FirstOffsetNumber;
tlen--;
if (pos != tlen)
memmove(itvec + pos, itvec + pos + 1, sizeof(IndexTuple) * (tlen - pos));
}
itvec = gistjoinvector(itvec, &tlen, state->itup, state->ituplen);
dist = gistSplit(state->r, state->stack->page, itvec, tlen, giststate);
state->itup = (IndexTuple *) palloc(sizeof(IndexTuple) * tlen);
state->ituplen = 0;
if (state->stack->blkno != GIST_ROOT_BLKNO)
{
/*
* if non-root split then we should not allocate new buffer, but
* we must create temporary page to operate
*/
dist->buffer = state->stack->buffer;
dist->page = PageGetTempPage(BufferGetPage(dist->buffer), sizeof(GISTPageOpaqueData));
/* clean all flags except F_LEAF */
GistPageGetOpaque(dist->page)->flags = (is_leaf) ? F_LEAF : 0;
}
/* make new pages and fills them */
for (ptr = dist; ptr; ptr = ptr->next)
{
int i;
char *data;
/* get new page */
if (ptr->buffer == InvalidBuffer)
{
ptr->buffer = gistNewBuffer(state->r);
GISTInitBuffer(ptr->buffer, (is_leaf) ? F_LEAF : 0);
ptr->page = BufferGetPage(ptr->buffer);
}
ptr->block.blkno = BufferGetBlockNumber(ptr->buffer);
/*
* fill page, we can do it because all these pages are new
* (ie not linked in tree or masked by temp page
*/
data = (char *) (ptr->list);
for (i = 0; i < ptr->block.num; i++)
{
if (PageAddItem(ptr->page, (Item) data, IndexTupleSize((IndexTuple) data), i + FirstOffsetNumber, LP_USED) == InvalidOffsetNumber)
elog(ERROR, "failed to add item to index page in \"%s\"", RelationGetRelationName(state->r));
data += IndexTupleSize((IndexTuple) data);
}
/* set up ItemPointer and remember it for parent */
ItemPointerSetBlockNumber(&(ptr->itup->t_tid), ptr->block.blkno);
state->itup[state->ituplen] = ptr->itup;
state->ituplen++;
}
//.........这里部分代码省略.........