本文整理汇总了C++中ReadBufferExtended函数的典型用法代码示例。如果您正苦于以下问题:C++ ReadBufferExtended函数的具体用法?C++ ReadBufferExtended怎么用?C++ ReadBufferExtended使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ReadBufferExtended函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: gistbulkdelete
/*
* Bulk deletion of all index entries pointing to a set of heap tuples and
* check invalid tuples after crash recovery.
* The set of target tuples is specified via a callback routine that tells
* whether any given heap tuple (identified by ItemPointer) is being deleted.
*
* Result: a palloc'd struct containing statistical info for VACUUM displays.
*/
Datum
gistbulkdelete(PG_FUNCTION_ARGS)
{
IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
GistBulkDeleteResult *stats = (GistBulkDeleteResult *) PG_GETARG_POINTER(1);
IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(2);
void *callback_state = (void *) PG_GETARG_POINTER(3);
Relation rel = info->index;
GistBDItem *stack,
*ptr;
/* first time through? */
if (stats == NULL)
stats = (GistBulkDeleteResult *) palloc0(sizeof(GistBulkDeleteResult));
/* we'll re-count the tuples each time */
stats->std.estimated_count = false;
stats->std.num_index_tuples = 0;
stack = (GistBDItem *) palloc0(sizeof(GistBDItem));
stack->blkno = GIST_ROOT_BLKNO;
while (stack)
{
Buffer buffer;
Page page;
OffsetNumber i,
maxoff;
IndexTuple idxtuple;
ItemId iid;
buffer = ReadBufferExtended(rel, MAIN_FORKNUM, stack->blkno,
RBM_NORMAL, info->strategy);
LockBuffer(buffer, GIST_SHARE);
gistcheckpage(rel, buffer);
page = (Page) BufferGetPage(buffer);
if (GistPageIsLeaf(page))
{
OffsetNumber todelete[MaxOffsetNumber];
int ntodelete = 0;
LockBuffer(buffer, GIST_UNLOCK);
LockBuffer(buffer, GIST_EXCLUSIVE);
page = (Page) BufferGetPage(buffer);
if (stack->blkno == GIST_ROOT_BLKNO && !GistPageIsLeaf(page))
{
/* only the root can become non-leaf during relock */
UnlockReleaseBuffer(buffer);
/* one more check */
continue;
}
/*
* check for split proceeded after look at parent, we should check
* it after relock
*/
pushStackIfSplited(page, stack);
/*
* Remove deletable tuples from page
*/
maxoff = PageGetMaxOffsetNumber(page);
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
iid = PageGetItemId(page, i);
idxtuple = (IndexTuple) PageGetItem(page, iid);
if (callback(&(idxtuple->t_tid), callback_state))
{
todelete[ntodelete] = i - ntodelete;
ntodelete++;
stats->std.tuples_removed += 1;
}
else
stats->std.num_index_tuples += 1;
}
if (ntodelete)
{
START_CRIT_SECTION();
MarkBufferDirty(buffer);
for (i = 0; i < ntodelete; i++)
PageIndexTupleDelete(page, todelete[i]);
GistMarkTuplesDeleted(page);
if (!rel->rd_istemp)
{
//.........这里部分代码省略.........
示例2: ginVacuumPostingTreeLeaves
/*
* Scan through posting tree, delete empty tuples from leaf pages.
* Also, this function collects empty subtrees (with all empty leafs).
* For parents of these subtrees CleanUp lock is taken, then we call
* ScanToDelete. This is done for every inner page, which points to
* empty subtree.
*/
static bool
ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
{
Buffer buffer;
Page page;
bool hasVoidPage = FALSE;
MemoryContext oldCxt;
buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, blkno,
RBM_NORMAL, gvs->strategy);
page = BufferGetPage(buffer);
ginTraverseLock(buffer, false);
Assert(GinPageIsData(page));
if (GinPageIsLeaf(page))
{
oldCxt = MemoryContextSwitchTo(gvs->tmpCxt);
ginVacuumPostingTreeLeaf(gvs->index, buffer, gvs);
MemoryContextSwitchTo(oldCxt);
MemoryContextReset(gvs->tmpCxt);
/* if root is a leaf page, we don't desire further processing */
if (GinDataLeafPageIsEmpty(page))
hasVoidPage = TRUE;
UnlockReleaseBuffer(buffer);
return hasVoidPage;
}
else
{
OffsetNumber i;
bool hasEmptyChild = FALSE;
bool hasNonEmptyChild = FALSE;
OffsetNumber maxoff = GinPageGetOpaque(page)->maxoff;
BlockNumber *children = palloc(sizeof(BlockNumber) * (maxoff + 1));
/*
* Read all children BlockNumbers. Not sure it is safe if there are
* many concurrent vacuums.
*/
for (i = FirstOffsetNumber; i <= maxoff; i++)
{
PostingItem *pitem = GinDataPageGetPostingItem(page, i);
children[i] = PostingItemGetBlockNumber(pitem);
}
UnlockReleaseBuffer(buffer);
for (i = FirstOffsetNumber; i <= maxoff; i++)
{
if (ginVacuumPostingTreeLeaves(gvs, children[i], FALSE))
hasEmptyChild = TRUE;
else
hasNonEmptyChild = TRUE;
}
pfree(children);
vacuum_delay_point();
/*
* All subtree is empty - just return TRUE to indicate that parent
* must do a cleanup. Unless we are ROOT an there is way to go upper.
*/
if (hasEmptyChild && !hasNonEmptyChild && !isRoot)
return TRUE;
if (hasEmptyChild)
{
DataPageDeleteStack root,
*ptr,
*tmp;
buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, blkno,
RBM_NORMAL, gvs->strategy);
LockBufferForCleanup(buffer);
memset(&root, 0, sizeof(DataPageDeleteStack));
root.leftBlkno = InvalidBlockNumber;
root.isRoot = TRUE;
ginScanToDelete(gvs, blkno, TRUE, &root, InvalidOffsetNumber);
ptr = root.child;
while (ptr)
{
//.........这里部分代码省略.........
示例3: ginvacuumcleanup
IndexBulkDeleteResult *
ginvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
{
Relation index = info->index;
bool needLock;
BlockNumber npages,
blkno;
BlockNumber totFreePages;
GinState ginstate;
GinStatsData idxStat;
/*
* In an autovacuum analyze, we want to clean up pending insertions.
* Otherwise, an ANALYZE-only call is a no-op.
*/
if (info->analyze_only)
{
if (IsAutoVacuumWorkerProcess())
{
initGinState(&ginstate, index);
ginInsertCleanup(&ginstate, false, true, stats);
}
return stats;
}
/*
* Set up all-zero stats and cleanup pending inserts if ginbulkdelete
* wasn't called
*/
if (stats == NULL)
{
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
initGinState(&ginstate, index);
ginInsertCleanup(&ginstate, !IsAutoVacuumWorkerProcess(),
false, stats);
}
memset(&idxStat, 0, sizeof(idxStat));
/*
* XXX we always report the heap tuple count as the number of index
* entries. This is bogus if the index is partial, but it's real hard to
* tell how many distinct heap entries are referenced by a GIN index.
*/
stats->num_index_tuples = info->num_heap_tuples;
stats->estimated_count = info->estimated_count;
/*
* Need lock unless it's local to this backend.
*/
needLock = !RELATION_IS_LOCAL(index);
if (needLock)
LockRelationForExtension(index, ExclusiveLock);
npages = RelationGetNumberOfBlocks(index);
if (needLock)
UnlockRelationForExtension(index, ExclusiveLock);
totFreePages = 0;
for (blkno = GIN_ROOT_BLKNO; blkno < npages; blkno++)
{
Buffer buffer;
Page page;
vacuum_delay_point();
buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno,
RBM_NORMAL, info->strategy);
LockBuffer(buffer, GIN_SHARE);
page = (Page) BufferGetPage(buffer);
if (PageIsNew(page) || GinPageIsDeleted(page))
{
Assert(blkno != GIN_ROOT_BLKNO);
RecordFreeIndexPage(index, blkno);
totFreePages++;
}
else if (GinPageIsData(page))
{
idxStat.nDataPages++;
}
else if (!GinPageIsList(page))
{
idxStat.nEntryPages++;
if (GinPageIsLeaf(page))
idxStat.nEntries += PageGetMaxOffsetNumber(page);
}
UnlockReleaseBuffer(buffer);
}
/* Update the metapage with accurate page and entry counts */
idxStat.nTotalPages = npages;
ginUpdateStats(info->index, &idxStat);
/* Finally, vacuum the FSM */
IndexFreeSpaceMapVacuum(info->index);
//.........这里部分代码省略.........
示例4: btvacuumpage
/*
* btvacuumpage --- VACUUM one page
*
* This processes a single page for btvacuumscan(). In some cases we
* must go back and re-examine previously-scanned pages; this routine
* recurses when necessary to handle that case.
*
* blkno is the page to process. orig_blkno is the highest block number
* reached by the outer btvacuumscan loop (the same as blkno, unless we
* are recursing to re-examine a previous page).
*/
static void
btvacuumpage(BTVacState *vstate, BlockNumber blkno, BlockNumber orig_blkno)
{
IndexVacuumInfo *info = vstate->info;
IndexBulkDeleteResult *stats = vstate->stats;
IndexBulkDeleteCallback callback = vstate->callback;
void *callback_state = vstate->callback_state;
Relation rel = info->index;
bool delete_now;
BlockNumber recurse_to;
Buffer buf;
Page page;
BTPageOpaque opaque;
restart:
delete_now = false;
recurse_to = P_NONE;
/* call vacuum_delay_point while not holding any buffer lock */
vacuum_delay_point();
/*
* We can't use _bt_getbuf() here because it always applies
* _bt_checkpage(), which will barf on an all-zero page. We want to
* recycle all-zero pages, not fail. Also, we want to use a nondefault
* buffer access strategy.
*/
buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
info->strategy);
LockBuffer(buf, BT_READ);
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
if (!PageIsNew(page))
_bt_checkpage(rel, buf);
/*
* If we are recursing, the only case we want to do anything with is a
* live leaf page having the current vacuum cycle ID. Any other state
* implies we already saw the page (eg, deleted it as being empty).
*/
if (blkno != orig_blkno)
{
if (_bt_page_recyclable(page) ||
P_IGNORE(opaque) ||
!P_ISLEAF(opaque) ||
opaque->btpo_cycleid != vstate->cycleid)
{
_bt_relbuf(rel, buf);
return;
}
}
/* Page is valid, see what to do with it */
if (_bt_page_recyclable(page))
{
/* Okay to recycle this page */
RecordFreeIndexPage(rel, blkno);
vstate->totFreePages++;
stats->pages_deleted++;
}
else if (P_ISDELETED(opaque))
{
/* Already deleted, but can't recycle yet */
stats->pages_deleted++;
}
else if (P_ISHALFDEAD(opaque))
{
/* Half-dead, try to delete */
delete_now = true;
}
else if (P_ISLEAF(opaque))
{
OffsetNumber deletable[MaxOffsetNumber];
int ndeletable;
OffsetNumber offnum,
minoff,
maxoff;
/*
* Trade in the initial read lock for a super-exclusive write lock on
* this page. We must get such a lock on every leaf page over the
* course of the vacuum scan, whether or not it actually contains any
* deletable tuples --- see nbtree/README.
*/
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockBufferForCleanup(buf);
/*
* Remember highest leaf page number we've taken cleanup lock on; see
//.........这里部分代码省略.........
示例5: ginDeletePage
/*
* Delete a posting tree page.
*/
static void
ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkno,
BlockNumber parentBlkno, OffsetNumber myoff, bool isParentRoot)
{
Buffer dBuffer;
Buffer lBuffer;
Buffer pBuffer;
Page page,
parentPage;
BlockNumber rightlink;
/*
* This function MUST be called only if someone of parent pages hold
* exclusive cleanup lock. This guarantees that no insertions currently
* happen in this subtree. Caller also acquire Exclusive lock on deletable
* page and is acquiring and releasing exclusive lock on left page before.
* Left page was locked and released. Then parent and this page are
* locked. We acquire left page lock here only to mark page dirty after
* changing right pointer.
*/
lBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, leftBlkno,
RBM_NORMAL, gvs->strategy);
dBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, deleteBlkno,
RBM_NORMAL, gvs->strategy);
pBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, parentBlkno,
RBM_NORMAL, gvs->strategy);
LockBuffer(lBuffer, GIN_EXCLUSIVE);
START_CRIT_SECTION();
/* Unlink the page by changing left sibling's rightlink */
page = BufferGetPage(dBuffer);
rightlink = GinPageGetOpaque(page)->rightlink;
page = BufferGetPage(lBuffer);
GinPageGetOpaque(page)->rightlink = rightlink;
/* Delete downlink from parent */
parentPage = BufferGetPage(pBuffer);
#ifdef USE_ASSERT_CHECKING
do
{
PostingItem *tod = GinDataPageGetPostingItem(parentPage, myoff);
Assert(PostingItemGetBlockNumber(tod) == deleteBlkno);
} while (0);
#endif
GinPageDeletePostingItem(parentPage, myoff);
page = BufferGetPage(dBuffer);
/*
* we shouldn't change rightlink field to save workability of running
* search scan
*/
GinPageGetOpaque(page)->flags = GIN_DELETED;
MarkBufferDirty(pBuffer);
MarkBufferDirty(lBuffer);
MarkBufferDirty(dBuffer);
if (RelationNeedsWAL(gvs->index))
{
XLogRecPtr recptr;
ginxlogDeletePage data;
/*
* We can't pass REGBUF_STANDARD for the deleted page, because we
* didn't set pd_lower on pre-9.4 versions. The page might've been
* binary-upgraded from an older version, and hence not have pd_lower
* set correctly. Ditto for the left page, but removing the item from
* the parent updated its pd_lower, so we know that's OK at this
* point.
*/
XLogBeginInsert();
XLogRegisterBuffer(0, dBuffer, 0);
XLogRegisterBuffer(1, pBuffer, REGBUF_STANDARD);
XLogRegisterBuffer(2, lBuffer, 0);
data.parentOffset = myoff;
data.rightLink = GinPageGetOpaque(page)->rightlink;
XLogRegisterData((char *) &data, sizeof(ginxlogDeletePage));
recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_DELETE_PAGE);
PageSetLSN(page, recptr);
PageSetLSN(parentPage, recptr);
PageSetLSN(BufferGetPage(lBuffer), recptr);
}
ReleaseBuffer(pBuffer);
UnlockReleaseBuffer(lBuffer);
ReleaseBuffer(dBuffer);
END_CRIT_SECTION();
//.........这里部分代码省略.........
示例6: lazy_scan_heap
//.........这里部分代码省略.........
vmbuffer = InvalidBuffer;
}
/* Log cleanup info before we touch indexes */
vacuum_log_cleanup_info(onerel, vacrelstats);
/* Remove index entries */
for (i = 0; i < nindexes; i++)
lazy_vacuum_index(Irel[i],
&indstats[i],
vacrelstats);
/* Remove tuples from heap */
lazy_vacuum_heap(onerel, vacrelstats);
/*
* Forget the now-vacuumed tuples, and press on, but be careful
* not to reset latestRemovedXid since we want that value to be
* valid.
*/
vacrelstats->num_dead_tuples = 0;
vacrelstats->num_index_scans++;
}
/*
* Pin the visibility map page in case we need to mark the page
* all-visible. In most cases this will be very cheap, because we'll
* already have the correct page pinned anyway. However, it's
* possible that (a) next_not_all_visible_block is covered by a
* different VM page than the current block or (b) we released our pin
* and did a cycle of index vacuuming.
*/
visibilitymap_pin(onerel, blkno, &vmbuffer);
buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
RBM_NORMAL, vac_strategy);
/* We need buffer cleanup lock so that we can prune HOT chains. */
if (!ConditionalLockBufferForCleanup(buf))
{
/*
* If we're not scanning the whole relation to guard against XID
* wraparound, it's OK to skip vacuuming a page. The next vacuum
* will clean it up.
*/
if (!scan_all)
{
ReleaseBuffer(buf);
continue;
}
/*
* If this is a wraparound checking vacuum, then we read the page
* with share lock to see if any xids need to be frozen. If the
* page doesn't need attention we just skip and continue. If it
* does, we wait for cleanup lock.
*
* We could defer the lock request further by remembering the page
* and coming back to it later, or we could even register
* ourselves for multiple buffers and then service whichever one
* is received first. For now, this seems good enough.
*/
LockBuffer(buf, BUFFER_LOCK_SHARE);
if (!lazy_check_needs_freeze(buf))
{
UnlockReleaseBuffer(buf);
continue;
示例7: gistvacuumcleanup
/*
* VACUUM cleanup: update FSM
*/
IndexBulkDeleteResult *
gistvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
{
Relation rel = info->index;
BlockNumber npages,
blkno;
BlockNumber totFreePages;
bool needLock;
/* No-op in ANALYZE ONLY mode */
if (info->analyze_only)
return stats;
/* Set up all-zero stats if gistbulkdelete wasn't called */
if (stats == NULL)
{
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
/* use heap's tuple count */
stats->num_index_tuples = info->num_heap_tuples;
stats->estimated_count = info->estimated_count;
/*
* XXX the above is wrong if index is partial. Would it be OK to just
* return NULL, or is there work we must do below?
*/
}
/*
* Need lock unless it's local to this backend.
*/
needLock = !RELATION_IS_LOCAL(rel);
/* try to find deleted pages */
if (needLock)
LockRelationForExtension(rel, ExclusiveLock);
npages = RelationGetNumberOfBlocks(rel);
if (needLock)
UnlockRelationForExtension(rel, ExclusiveLock);
totFreePages = 0;
for (blkno = GIST_ROOT_BLKNO + 1; blkno < npages; blkno++)
{
Buffer buffer;
Page page;
vacuum_delay_point();
buffer = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
info->strategy);
LockBuffer(buffer, GIST_SHARE);
page = (Page) BufferGetPage(buffer);
if (PageIsNew(page) || GistPageIsDeleted(page))
{
totFreePages++;
RecordFreeIndexPage(rel, blkno);
}
UnlockReleaseBuffer(buffer);
}
/* Finally, vacuum the FSM */
IndexFreeSpaceMapVacuum(info->index);
/* return statistics */
stats->pages_free = totFreePages;
if (needLock)
LockRelationForExtension(rel, ExclusiveLock);
stats->num_pages = RelationGetNumberOfBlocks(rel);
if (needLock)
UnlockRelationForExtension(rel, ExclusiveLock);
return stats;
}
示例8: hashbulkdelete
/*
* Bulk deletion of all index entries pointing to a set of heap tuples.
* The set of target tuples is specified via a callback routine that tells
* whether any given heap tuple (identified by ItemPointer) is being deleted.
*
* This function also deletes the tuples that are moved by split to other
* bucket.
*
* Result: a palloc'd struct containing statistical info for VACUUM displays.
*/
IndexBulkDeleteResult *
hashbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
IndexBulkDeleteCallback callback, void *callback_state)
{
Relation rel = info->index;
double tuples_removed;
double num_index_tuples;
double orig_ntuples;
Bucket orig_maxbucket;
Bucket cur_maxbucket;
Bucket cur_bucket;
Buffer metabuf = InvalidBuffer;
HashMetaPage metap;
HashMetaPage cachedmetap;
tuples_removed = 0;
num_index_tuples = 0;
/*
* We need a copy of the metapage so that we can use its hashm_spares[]
* values to compute bucket page addresses, but a cached copy should be
* good enough. (If not, we'll detect that further down and refresh the
* cache as necessary.)
*/
cachedmetap = _hash_getcachedmetap(rel, &metabuf, false);
Assert(cachedmetap != NULL);
orig_maxbucket = cachedmetap->hashm_maxbucket;
orig_ntuples = cachedmetap->hashm_ntuples;
/* Scan the buckets that we know exist */
cur_bucket = 0;
cur_maxbucket = orig_maxbucket;
loop_top:
while (cur_bucket <= cur_maxbucket)
{
BlockNumber bucket_blkno;
BlockNumber blkno;
Buffer bucket_buf;
Buffer buf;
HashPageOpaque bucket_opaque;
Page page;
bool split_cleanup = false;
/* Get address of bucket's start page */
bucket_blkno = BUCKET_TO_BLKNO(cachedmetap, cur_bucket);
blkno = bucket_blkno;
/*
* We need to acquire a cleanup lock on the primary bucket page to out
* wait concurrent scans before deleting the dead tuples.
*/
buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, info->strategy);
LockBufferForCleanup(buf);
_hash_checkpage(rel, buf, LH_BUCKET_PAGE);
page = BufferGetPage(buf);
bucket_opaque = (HashPageOpaque) PageGetSpecialPointer(page);
/*
* If the bucket contains tuples that are moved by split, then we need
* to delete such tuples. We can't delete such tuples if the split
* operation on bucket is not finished as those are needed by scans.
*/
if (!H_BUCKET_BEING_SPLIT(bucket_opaque) &&
H_NEEDS_SPLIT_CLEANUP(bucket_opaque))
{
split_cleanup = true;
/*
* This bucket might have been split since we last held a lock on
* the metapage. If so, hashm_maxbucket, hashm_highmask and
* hashm_lowmask might be old enough to cause us to fail to remove
* tuples left behind by the most recent split. To prevent that,
* now that the primary page of the target bucket has been locked
* (and thus can't be further split), check whether we need to
* update our cached metapage data.
*
* NB: The check for InvalidBlockNumber is only needed for
* on-disk compatibility with indexes created before we started
* storing hashm_maxbucket in the primary page's hasho_prevblkno.
*/
if (bucket_opaque->hasho_prevblkno != InvalidBlockNumber &&
bucket_opaque->hasho_prevblkno > cachedmetap->hashm_maxbucket)
{
cachedmetap = _hash_getcachedmetap(rel, &metabuf, true);
Assert(cachedmetap != NULL);
}
//.........这里部分代码省略.........
示例9: fsm_readbuf
/*
* Read a FSM page.
*
* If the page doesn't exist, InvalidBuffer is returned, or if 'extend' is
* true, the FSM file is extended.
*/
static Buffer
fsm_readbuf(Relation rel, FSMAddress addr, bool extend)
{
BlockNumber blkno = fsm_logical_to_physical(addr);
Buffer buf;
RelationOpenSmgr(rel);
/*
* If we haven't cached the size of the FSM yet, check it first. Also
* recheck if the requested block seems to be past end, since our cached
* value might be stale. (We send smgr inval messages on truncation, but
* not on extension.)
*/
if (rel->rd_smgr->smgr_fsm_nblocks == InvalidBlockNumber ||
blkno >= rel->rd_smgr->smgr_fsm_nblocks)
{
if (smgrexists(rel->rd_smgr, FSM_FORKNUM))
rel->rd_smgr->smgr_fsm_nblocks = smgrnblocks(rel->rd_smgr,
FSM_FORKNUM);
else
rel->rd_smgr->smgr_fsm_nblocks = 0;
}
/* Handle requests beyond EOF */
if (blkno >= rel->rd_smgr->smgr_fsm_nblocks)
{
if (extend)
fsm_extend(rel, blkno + 1);
else
return InvalidBuffer;
}
/*
* Use ZERO_ON_ERROR mode, and initialize the page if necessary. The FSM
* information is not accurate anyway, so it's better to clear corrupt
* pages than error out. Since the FSM changes are not WAL-logged, the
* so-called torn page problem on crash can lead to pages with corrupt
* headers, for example.
*
* The initialize-the-page part is trickier than it looks, because of the
* possibility of multiple backends doing this concurrently, and our
* desire to not uselessly take the buffer lock in the normal path where
* the page is OK. We must take the lock to initialize the page, so
* recheck page newness after we have the lock, in case someone else
* already did it. Also, because we initially check PageIsNew with no
* lock, it's possible to fall through and return the buffer while someone
* else is still initializing the page (i.e., we might see pd_upper as set
* but other page header fields are still zeroes). This is harmless for
* callers that will take a buffer lock themselves, but some callers
* inspect the page without any lock at all. The latter is OK only so
* long as it doesn't depend on the page header having correct contents.
* Current usage is safe because PageGetContents() does not require that.
*/
buf = ReadBufferExtended(rel, FSM_FORKNUM, blkno, RBM_ZERO_ON_ERROR, NULL);
if (PageIsNew(BufferGetPage(buf)))
{
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
if (PageIsNew(BufferGetPage(buf)))
PageInit(BufferGetPage(buf), BLCKSZ, 0);
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
}
return buf;
}
示例10: ginDeletePage
/*
* Delete a posting tree page.
*/
static void
ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkno,
BlockNumber parentBlkno, OffsetNumber myoff, bool isParentRoot)
{
Buffer dBuffer;
Buffer lBuffer;
Buffer pBuffer;
Page page,
parentPage;
BlockNumber rightlink;
/*
* Lock the pages in the same order as an insertion would, to avoid
* deadlocks: left, then right, then parent.
*/
lBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, leftBlkno,
RBM_NORMAL, gvs->strategy);
dBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, deleteBlkno,
RBM_NORMAL, gvs->strategy);
pBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, parentBlkno,
RBM_NORMAL, gvs->strategy);
LockBuffer(lBuffer, GIN_EXCLUSIVE);
LockBuffer(dBuffer, GIN_EXCLUSIVE);
if (!isParentRoot) /* parent is already locked by
* LockBufferForCleanup() */
LockBuffer(pBuffer, GIN_EXCLUSIVE);
START_CRIT_SECTION();
/* Unlink the page by changing left sibling's rightlink */
page = BufferGetPage(dBuffer);
rightlink = GinPageGetOpaque(page)->rightlink;
page = BufferGetPage(lBuffer);
GinPageGetOpaque(page)->rightlink = rightlink;
/* Delete downlink from parent */
parentPage = BufferGetPage(pBuffer);
#ifdef USE_ASSERT_CHECKING
do
{
PostingItem *tod = GinDataPageGetPostingItem(parentPage, myoff);
Assert(PostingItemGetBlockNumber(tod) == deleteBlkno);
} while (0);
#endif
GinPageDeletePostingItem(parentPage, myoff);
page = BufferGetPage(dBuffer);
/*
* we shouldn't change rightlink field to save workability of running
* search scan
*/
GinPageGetOpaque(page)->flags = GIN_DELETED;
MarkBufferDirty(pBuffer);
MarkBufferDirty(lBuffer);
MarkBufferDirty(dBuffer);
if (RelationNeedsWAL(gvs->index))
{
XLogRecPtr recptr;
ginxlogDeletePage data;
/*
* We can't pass REGBUF_STANDARD for the deleted page, because we
* didn't set pd_lower on pre-9.4 versions. The page might've been
* binary-upgraded from an older version, and hence not have pd_lower
* set correctly. Ditto for the left page, but removing the item from
* the parent updated its pd_lower, so we know that's OK at this
* point.
*/
XLogBeginInsert();
XLogRegisterBuffer(0, dBuffer, 0);
XLogRegisterBuffer(1, pBuffer, REGBUF_STANDARD);
XLogRegisterBuffer(2, lBuffer, 0);
data.parentOffset = myoff;
data.rightLink = GinPageGetOpaque(page)->rightlink;
XLogRegisterData((char *) &data, sizeof(ginxlogDeletePage));
recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_DELETE_PAGE);
PageSetLSN(page, recptr);
PageSetLSN(parentPage, recptr);
PageSetLSN(BufferGetPage(lBuffer), recptr);
}
if (!isParentRoot)
LockBuffer(pBuffer, GIN_UNLOCK);
ReleaseBuffer(pBuffer);
UnlockReleaseBuffer(lBuffer);
UnlockReleaseBuffer(dBuffer);
END_CRIT_SECTION();
//.........这里部分代码省略.........
示例11: blgetbitmap
/*
* Insert all matching tuples into a bitmap.
*/
int64
blgetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
{
int64 ntids = 0;
BlockNumber blkno = BLOOM_HEAD_BLKNO,
npages;
int i;
BufferAccessStrategy bas;
BloomScanOpaque so = (BloomScanOpaque) scan->opaque;
if (so->sign == NULL)
{
/* New search: have to calculate search signature */
ScanKey skey = scan->keyData;
so->sign = palloc0(sizeof(BloomSignatureWord) * so->state.opts.bloomLength);
for (i = 0; i < scan->numberOfKeys; i++)
{
/*
* Assume bloom-indexable operators to be strict, so nothing could
* be found for NULL key.
*/
if (skey->sk_flags & SK_ISNULL)
{
pfree(so->sign);
so->sign = NULL;
return 0;
}
/* Add next value to the signature */
signValue(&so->state, so->sign, skey->sk_argument,
skey->sk_attno - 1);
skey++;
}
}
/*
* We're going to read the whole index. This is why we use appropriate
* buffer access strategy.
*/
bas = GetAccessStrategy(BAS_BULKREAD);
npages = RelationGetNumberOfBlocks(scan->indexRelation);
for (blkno = BLOOM_HEAD_BLKNO; blkno < npages; blkno++)
{
Buffer buffer;
Page page;
buffer = ReadBufferExtended(scan->indexRelation, MAIN_FORKNUM,
blkno, RBM_NORMAL, bas);
LockBuffer(buffer, BUFFER_LOCK_SHARE);
page = BufferGetPage(buffer);
TestForOldSnapshot(scan->xs_snapshot, scan->indexRelation, page);
if (!PageIsNew(page) && !BloomPageIsDeleted(page))
{
OffsetNumber offset,
maxOffset = BloomPageGetMaxOffset(page);
for (offset = 1; offset <= maxOffset; offset++)
{
BloomTuple *itup = BloomPageGetTuple(&so->state, page, offset);
bool res = true;
/* Check index signature with scan signature */
for (i = 0; i < so->state.opts.bloomLength; i++)
{
if ((itup->sign[i] & so->sign[i]) != so->sign[i])
{
res = false;
break;
}
}
/* Add matching tuples to bitmap */
if (res)
{
tbm_add_tuples(tbm, &itup->heapPtr, 1, true);
ntids++;
}
}
}
UnlockReleaseBuffer(buffer);
CHECK_FOR_INTERRUPTS();
}
FreeAccessStrategy(bas);
return ntids;
}
示例12: ginVacuumPostingTreeLeaves
static bool
ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot, Buffer *rootBuffer)
{
Buffer buffer;
Page page;
bool hasVoidPage = FALSE;
MemoryContext oldCxt;
buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, blkno,
RBM_NORMAL, gvs->strategy);
page = BufferGetPage(buffer);
/*
* We should be sure that we don't concurrent with inserts, insert process
* never release root page until end (but it can unlock it and lock
* again). New scan can't start but previously started ones work
* concurrently.
*/
if (isRoot)
LockBufferForCleanup(buffer);
else
LockBuffer(buffer, GIN_EXCLUSIVE);
Assert(GinPageIsData(page));
if (GinPageIsLeaf(page))
{
oldCxt = MemoryContextSwitchTo(gvs->tmpCxt);
ginVacuumPostingTreeLeaf(gvs->index, buffer, gvs);
MemoryContextSwitchTo(oldCxt);
MemoryContextReset(gvs->tmpCxt);
/* if root is a leaf page, we don't desire further processing */
if (!isRoot && !hasVoidPage && GinDataLeafPageIsEmpty(page))
hasVoidPage = TRUE;
}
else
{
OffsetNumber i;
bool isChildHasVoid = FALSE;
for (i = FirstOffsetNumber; i <= GinPageGetOpaque(page)->maxoff; i++)
{
PostingItem *pitem = GinDataPageGetPostingItem(page, i);
if (ginVacuumPostingTreeLeaves(gvs, PostingItemGetBlockNumber(pitem), FALSE, NULL))
isChildHasVoid = TRUE;
}
if (isChildHasVoid)
hasVoidPage = TRUE;
}
/*
* if we have root and there are empty pages in tree, then we don't
* release lock to go further processing and guarantee that tree is unused
*/
if (!(isRoot && hasVoidPage))
{
UnlockReleaseBuffer(buffer);
}
else
{
Assert(rootBuffer);
*rootBuffer = buffer;
}
return hasVoidPage;
}
示例13: get_raw_page_internal
/*
* workhorse
*/
static bytea *
get_raw_page_internal(text *relname, ForkNumber forknum, BlockNumber blkno)
{
bytea *raw_page;
RangeVar *relrv;
Relation rel;
char *raw_page_data;
Buffer buf;
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to use raw functions"))));
relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
rel = relation_openrv(relrv, AccessShareLock);
/* Check that this relation has storage */
if (rel->rd_rel->relkind == RELKIND_VIEW)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot get raw page from view \"%s\"",
RelationGetRelationName(rel))));
if (rel->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot get raw page from composite type \"%s\"",
RelationGetRelationName(rel))));
/*
* Reject attempts to read non-local temporary relations; we would be
* likely to get wrong data since we have no visibility into the owning
* session's local buffers.
*/
if (RELATION_IS_OTHER_TEMP(rel))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot access temporary tables of other sessions")));
if (blkno >= RelationGetNumberOfBlocks(rel))
elog(ERROR, "block number %u is out of range for relation \"%s\"",
blkno, RelationGetRelationName(rel));
/* Initialize buffer to copy to */
raw_page = (bytea *) palloc(BLCKSZ + VARHDRSZ);
SET_VARSIZE(raw_page, BLCKSZ + VARHDRSZ);
raw_page_data = VARDATA(raw_page);
/* Take a verbatim copy of the page */
buf = ReadBufferExtended(rel, forknum, blkno, RBM_NORMAL, NULL);
LockBuffer(buf, BUFFER_LOCK_SHARE);
memcpy(raw_page_data, BufferGetPage(buf), BLCKSZ);
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buf);
relation_close(rel, AccessShareLock);
return raw_page;
}
示例14: gistvacuumcleanup
/*
* VACUUM cleanup: update FSM
*/
Datum
gistvacuumcleanup(PG_FUNCTION_ARGS)
{
IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
GistBulkDeleteResult *stats = (GistBulkDeleteResult *) PG_GETARG_POINTER(1);
Relation rel = info->index;
BlockNumber npages,
blkno;
BlockNumber totFreePages;
BlockNumber lastBlock = GIST_ROOT_BLKNO,
lastFilledBlock = GIST_ROOT_BLKNO;
bool needLock;
/* No-op in ANALYZE ONLY mode */
if (info->analyze_only)
PG_RETURN_POINTER(stats);
/* Set up all-zero stats if gistbulkdelete wasn't called */
if (stats == NULL)
{
stats = (GistBulkDeleteResult *) palloc0(sizeof(GistBulkDeleteResult));
/* use heap's tuple count */
stats->std.num_index_tuples = info->num_heap_tuples;
stats->std.estimated_count = info->estimated_count;
/*
* XXX the above is wrong if index is partial. Would it be OK to just
* return NULL, or is there work we must do below?
*/
}
if (stats->needReindex)
ereport(NOTICE,
(errmsg("index \"%s\" needs VACUUM FULL or REINDEX to finish crash recovery",
RelationGetRelationName(rel))));
/*
* Need lock unless it's local to this backend.
*/
needLock = !RELATION_IS_LOCAL(rel);
/* try to find deleted pages */
if (needLock)
LockRelationForExtension(rel, ExclusiveLock);
npages = RelationGetNumberOfBlocks(rel);
if (needLock)
UnlockRelationForExtension(rel, ExclusiveLock);
totFreePages = 0;
for (blkno = GIST_ROOT_BLKNO + 1; blkno < npages; blkno++)
{
Buffer buffer;
Page page;
vacuum_delay_point();
buffer = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
info->strategy);
LockBuffer(buffer, GIST_SHARE);
page = (Page) BufferGetPage(buffer);
if (PageIsNew(page) || GistPageIsDeleted(page))
{
totFreePages++;
RecordFreeIndexPage(rel, blkno);
}
else
lastFilledBlock = blkno;
UnlockReleaseBuffer(buffer);
}
lastBlock = npages - 1;
/* Finally, vacuum the FSM */
IndexFreeSpaceMapVacuum(info->index);
/* return statistics */
stats->std.pages_free = totFreePages;
if (needLock)
LockRelationForExtension(rel, ExclusiveLock);
stats->std.num_pages = RelationGetNumberOfBlocks(rel);
if (needLock)
UnlockRelationForExtension(rel, ExclusiveLock);
PG_RETURN_POINTER(stats);
}
示例15: pg_prewarm
//.........这里部分代码省略.........
rel = relation_open(relOid, AccessShareLock);
aclresult = pg_class_aclcheck(relOid, GetUserId(), ACL_SELECT);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, ACL_KIND_CLASS, get_rel_name(relOid));
/* Check that the fork exists. */
RelationOpenSmgr(rel);
if (!smgrexists(rel->rd_smgr, forkNumber))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("fork \"%s\" does not exist for this relation",
forkString)));
/* Validate block numbers, or handle nulls. */
nblocks = RelationGetNumberOfBlocksInFork(rel, forkNumber);
if (PG_ARGISNULL(3))
first_block = 0;
else
{
first_block = PG_GETARG_INT64(3);
if (first_block < 0 || first_block >= nblocks)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("starting block number must be between 0 and " INT64_FORMAT,
nblocks - 1)));
}
if (PG_ARGISNULL(4))
last_block = nblocks - 1;
else
{
last_block = PG_GETARG_INT64(4);
if (last_block < 0 || last_block >= nblocks)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("ending block number must be between 0 and " INT64_FORMAT,
nblocks - 1)));
}
/* Now we're ready to do the real work. */
if (ptype == PREWARM_PREFETCH)
{
#ifdef USE_PREFETCH
/*
* In prefetch mode, we just hint the OS to read the blocks, but we
* don't know whether it really does it, and we don't wait for it to
* finish.
*
* It would probably be better to pass our prefetch requests in chunks
* of a megabyte or maybe even a whole segment at a time, but there's
* no practical way to do that at present without a gross modularity
* violation, so we just do this.
*/
for (block = first_block; block <= last_block; ++block)
{
CHECK_FOR_INTERRUPTS();
PrefetchBuffer(rel, forkNumber, block);
++blocks_done;
}
#else
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("prefetch is not supported by this build")));
#endif
}
else if (ptype == PREWARM_READ)
{
/*
* In read mode, we actually read the blocks, but not into shared
* buffers. This is more portable than prefetch mode (it works
* everywhere) and is synchronous.
*/
for (block = first_block; block <= last_block; ++block)
{
CHECK_FOR_INTERRUPTS();
smgrread(rel->rd_smgr, forkNumber, block, blockbuffer);
++blocks_done;
}
}
else if (ptype == PREWARM_BUFFER)
{
/*
* In buffer mode, we actually pull the data into shared_buffers.
*/
for (block = first_block; block <= last_block; ++block)
{
Buffer buf;
CHECK_FOR_INTERRUPTS();
buf = ReadBufferExtended(rel, forkNumber, block, RBM_NORMAL, NULL);
ReleaseBuffer(buf);
++blocks_done;
}
}
/* Close relation, release lock. */
relation_close(rel, AccessShareLock);
PG_RETURN_INT64(blocks_done);
}