本文整理汇总了C++中RelationGetNumberOfBlocks函数的典型用法代码示例。如果您正苦于以下问题:C++ RelationGetNumberOfBlocks函数的具体用法?C++ RelationGetNumberOfBlocks怎么用?C++ RelationGetNumberOfBlocks使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了RelationGetNumberOfBlocks函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: pgstat_index
/*
* pgstat_index -- returns live/dead tuples info in a generic index
*/
static Datum
pgstat_index(Relation rel, BlockNumber start, pgstat_page pagefn,
FunctionCallInfo fcinfo)
{
BlockNumber nblocks;
BlockNumber blkno;
BufferAccessStrategy bstrategy;
pgstattuple_type stat = {0};
/* prepare access strategy for this index */
bstrategy = GetAccessStrategy(BAS_BULKREAD);
blkno = start;
for (;;)
{
/* Get the current relation length */
LockRelationForExtension(rel, ExclusiveLock);
nblocks = RelationGetNumberOfBlocks(rel);
UnlockRelationForExtension(rel, ExclusiveLock);
/* Quit if we've scanned the whole relation */
if (blkno >= nblocks)
{
stat.table_len = (uint64) nblocks *BLCKSZ;
break;
}
for (; blkno < nblocks; blkno++)
{
CHECK_FOR_INTERRUPTS();
pagefn(&stat, rel, blkno, bstrategy);
}
}
relation_close(rel, AccessShareLock);
return build_pgstattuple_type(&stat, fcinfo);
}
示例2: blbuild
/*
* Build a new bloom index.
*/
IndexBuildResult *
blbuild(Relation heap, Relation index, IndexInfo *indexInfo)
{
IndexBuildResult *result;
double reltuples;
BloomBuildState buildstate;
if (RelationGetNumberOfBlocks(index) != 0)
elog(ERROR, "index \"%s\" already contains data",
RelationGetRelationName(index));
/* Initialize the meta page */
BloomInitMetapage(index);
/* Initialize the bloom build state */
memset(&buildstate, 0, sizeof(buildstate));
initBloomState(&buildstate.blstate, index);
buildstate.tmpCtx = AllocSetContextCreate(CurrentMemoryContext,
"Bloom build temporary context",
ALLOCSET_DEFAULT_SIZES);
initCachedPage(&buildstate);
/* Do the heap scan */
reltuples = IndexBuildHeapScan(heap, index, indexInfo, true,
bloomBuildCallback, (void *) &buildstate);
/*
* There are could be some items in cached page. Flush this page if
* needed.
*/
if (buildstate.count > 0)
flushCachedPage(index, &buildstate);
MemoryContextDelete(buildstate.tmpCtx);
result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult));
result->heap_tuples = result->index_tuples = reltuples;
return result;
}
示例3: pg_relpagesbyid
Datum
pg_relpagesbyid(PG_FUNCTION_ARGS)
{
Oid relid = PG_GETARG_OID(0);
int64 relpages;
Relation rel;
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to use pgstattuple functions"))));
rel = relation_open(relid, AccessShareLock);
/* note: this will work OK on non-local temp tables */
relpages = RelationGetNumberOfBlocks(rel);
relation_close(rel, AccessShareLock);
PG_RETURN_INT64(relpages);
}
示例4: hashbuild
/*
* hashbuild() -- build a new hash index.
*/
Datum
hashbuild(PG_FUNCTION_ARGS)
{
Relation heap = (Relation) PG_GETARG_POINTER(0);
Relation index = (Relation) PG_GETARG_POINTER(1);
IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
IndexBuildResult *result;
double reltuples;
HashBuildState buildstate;
/*
* We expect to be called exactly once for any index relation. If that's
* not the case, big trouble's what we have.
*/
if (RelationGetNumberOfBlocks(index) != 0)
elog(ERROR, "index \"%s\" already contains data",
RelationGetRelationName(index));
/* initialize the hash index metadata page */
_hash_metapinit(index);
/* build the index */
buildstate.indtuples = 0;
/* do the heap scan */
reltuples = IndexBuildHeapScan(heap, index, indexInfo,
hashbuildCallback, (void *) &buildstate);
/*
* Return statistics
*/
result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult));
result->heap_tuples = reltuples;
result->index_tuples = buildstate.indtuples;
PG_RETURN_POINTER(result);
}
示例5: pg_relpages
/* --------------------------------------------------------
* pg_relpages()
*
* Get a number of pages of the table/index.
*
* Usage: SELECT pg_relpages('t1');
* SELECT pg_relpages('t1_pkey');
* --------------------------------------------------------
*/
Datum
pg_relpages(PG_FUNCTION_ARGS)
{
text *relname = PG_GETARG_TEXT_P(0);
Relation rel;
RangeVar *relrv;
int4 relpages;
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to use pgstattuple functions"))));
relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
rel = relation_openrv(relrv, AccessShareLock);
relpages = RelationGetNumberOfBlocks(rel);
relation_close(rel, AccessShareLock);
PG_RETURN_INT32(relpages);
}
示例6: GetPageWithFreeSpace
/*
* GetPageWithFreeSpace - try to find a page in the given relation with
* at least the specified amount of free space.
*
* If successful, return the block number; if not, return InvalidBlockNumber.
*
* The caller must be prepared for the possibility that the returned page
* will turn out to have too little space available by the time the caller
* gets a lock on it. In that case, the caller should report the actual
* amount of free space available on that page and then try again (see
* RecordAndGetPageWithFreeSpace). If InvalidBlockNumber is returned,
* extend the relation.
*
* For very small heap relations that don't have a FSM, we try every other
* page before extending the relation. To keep track of which pages have
* been tried, initialize a local in-memory map of pages.
*/
BlockNumber
GetPageWithFreeSpace(Relation rel, Size spaceNeeded, bool check_fsm_only)
{
uint8 min_cat = fsm_space_needed_to_cat(spaceNeeded);
BlockNumber target_block,
nblocks;
/* First try the FSM, if it exists. */
target_block = fsm_search(rel, min_cat);
if (target_block == InvalidBlockNumber &&
(rel->rd_rel->relkind == RELKIND_RELATION ||
rel->rd_rel->relkind == RELKIND_TOASTVALUE) &&
!check_fsm_only)
{
nblocks = RelationGetNumberOfBlocks(rel);
if (nblocks > HEAP_FSM_CREATION_THRESHOLD)
{
/*
* If the FSM knows nothing of the rel, try the last page before
* we give up and extend. This avoids one-tuple-per-page syndrome
* during bootstrapping or in a recently-started system.
*/
target_block = nblocks - 1;
}
else if (nblocks > 0)
{
/* Create or update local map and get first candidate block. */
fsm_local_set(rel, nblocks);
target_block = fsm_local_search();
}
}
return target_block;
}
示例7: bingo_vacuumcleanup
/*
* Post-VACUUM cleanup.
*
* Result: a palloc'd struct containing statistical info for VACUUM displays.
*/
Datum bingo_vacuumcleanup(PG_FUNCTION_ARGS) {
IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
Relation rel = info->index;
BlockNumber num_pages = 0;
elog(NOTICE, "start test vacuum");
/*
* If bulkdelete wasn't called, return NULL signifying no change
* Note: this covers the analyze_only case too
*/
if (stats == NULL) {
PG_RETURN_POINTER(NULL);
}
/*
* update statistics
*/
num_pages = RelationGetNumberOfBlocks(rel);
stats->num_pages = num_pages;
stats->num_index_tuples = 1;
stats->estimated_count = false;
PG_RETURN_POINTER(stats);
}
示例8: gistbuild
/*
* Main entry point to GiST index build. Initially calls insert over and over,
* but switches to more efficient buffering build algorithm after a certain
* number of tuples (unless buffering mode is disabled).
*/
Datum
gistbuild(PG_FUNCTION_ARGS)
{
Relation heap = (Relation) PG_GETARG_POINTER(0);
Relation index = (Relation) PG_GETARG_POINTER(1);
IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
IndexBuildResult *result;
double reltuples;
GISTBuildState buildstate;
Buffer buffer;
Page page;
MemoryContext oldcxt = CurrentMemoryContext;
int fillfactor;
buildstate.indexrel = index;
if (index->rd_options)
{
/* Get buffering mode from the options string */
GiSTOptions *options = (GiSTOptions *) index->rd_options;
char *bufferingMode = (char *) options + options->bufferingModeOffset;
if (strcmp(bufferingMode, "on") == 0)
buildstate.bufferingMode = GIST_BUFFERING_STATS;
else if (strcmp(bufferingMode, "off") == 0)
buildstate.bufferingMode = GIST_BUFFERING_DISABLED;
else
buildstate.bufferingMode = GIST_BUFFERING_AUTO;
fillfactor = options->fillfactor;
}
else
{
/*
* By default, switch to buffering mode when the index grows too large
* to fit in cache.
*/
buildstate.bufferingMode = GIST_BUFFERING_AUTO;
fillfactor = GIST_DEFAULT_FILLFACTOR;
}
/* Calculate target amount of free space to leave on pages */
buildstate.freespace = BLCKSZ * (100 - fillfactor) / 100;
/*
* We expect to be called exactly once for any index relation. If that's
* not the case, big trouble's what we have.
*/
if (RelationGetNumberOfBlocks(index) != 0)
elog(ERROR, "index \"%s\" already contains data",
RelationGetRelationName(index));
/* no locking is needed */
buildstate.giststate = initGISTstate(index);
/*
* Create a temporary memory context that is reset once for each tuple
* processed. (Note: we don't bother to make this a child of the
* giststate's scanCxt, so we have to delete it separately at the end.)
*/
buildstate.giststate->tempCxt = createTempGistContext();
/* initialize the root page */
buffer = gistNewBuffer(index);
Assert(BufferGetBlockNumber(buffer) == GIST_ROOT_BLKNO);
page = BufferGetPage(buffer);
START_CRIT_SECTION();
GISTInitBuffer(buffer, F_LEAF);
MarkBufferDirty(buffer);
if (RelationNeedsWAL(index))
{
XLogRecPtr recptr;
XLogRecData rdata;
rdata.data = (char *) &(index->rd_node);
rdata.len = sizeof(RelFileNode);
rdata.buffer = InvalidBuffer;
rdata.next = NULL;
recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_CREATE_INDEX, &rdata);
PageSetLSN(page, recptr);
PageSetTLI(page, ThisTimeLineID);
}
else
PageSetLSN(page, gistGetFakeLSN(heap));
UnlockReleaseBuffer(buffer);
END_CRIT_SECTION();
/* build the index */
buildstate.indtuples = 0;
buildstate.indtuplesSize = 0;
//.........这里部分代码省略.........
示例9: hashbuild
/*
* hashbuild() -- build a new hash index.
*/
IndexBuildResult *
hashbuild(Relation heap, Relation index, IndexInfo *indexInfo)
{
IndexBuildResult *result;
BlockNumber relpages;
double reltuples;
double allvisfrac;
uint32 num_buckets;
long sort_threshold;
HashBuildState buildstate;
/*
* We expect to be called exactly once for any index relation. If that's
* not the case, big trouble's what we have.
*/
if (RelationGetNumberOfBlocks(index) != 0)
elog(ERROR, "index \"%s\" already contains data",
RelationGetRelationName(index));
/* Estimate the number of rows currently present in the table */
estimate_rel_size(heap, NULL, &relpages, &reltuples, &allvisfrac);
/* Initialize the hash index metadata page and initial buckets */
num_buckets = _hash_init(index, reltuples, MAIN_FORKNUM);
/*
* If we just insert the tuples into the index in scan order, then
* (assuming their hash codes are pretty random) there will be no locality
* of access to the index, and if the index is bigger than available RAM
* then we'll thrash horribly. To prevent that scenario, we can sort the
* tuples by (expected) bucket number. However, such a sort is useless
* overhead when the index does fit in RAM. We choose to sort if the
* initial index size exceeds maintenance_work_mem, or the number of
* buffers usable for the index, whichever is less. (Limiting by the
* number of buffers should reduce thrashing between PG buffers and kernel
* buffers, which seems useful even if no physical I/O results. Limiting
* by maintenance_work_mem is useful to allow easy testing of the sort
* code path, and may be useful to DBAs as an additional control knob.)
*
* NOTE: this test will need adjustment if a bucket is ever different from
* one page. Also, "initial index size" accounting does not include the
* metapage, nor the first bitmap page.
*/
sort_threshold = (maintenance_work_mem * 1024L) / BLCKSZ;
if (index->rd_rel->relpersistence != RELPERSISTENCE_TEMP)
sort_threshold = Min(sort_threshold, NBuffers);
else
sort_threshold = Min(sort_threshold, NLocBuffer);
if (num_buckets >= (uint32) sort_threshold)
buildstate.spool = _h_spoolinit(heap, index, num_buckets);
else
buildstate.spool = NULL;
/* prepare to build the index */
buildstate.indtuples = 0;
buildstate.heapRel = heap;
/* do the heap scan */
reltuples = IndexBuildHeapScan(heap, index, indexInfo, true,
hashbuildCallback, (void *) &buildstate);
if (buildstate.spool)
{
/* sort the tuples and insert them into the index */
_h_indexbuild(buildstate.spool, buildstate.heapRel);
_h_spooldestroy(buildstate.spool);
}
/*
* Return statistics
*/
result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult));
result->heap_tuples = reltuples;
result->index_tuples = buildstate.indtuples;
return result;
}
示例10: pgstatindex_impl
static Datum
pgstatindex_impl(Relation rel, FunctionCallInfo fcinfo)
{
Datum result;
BlockNumber nblocks;
BlockNumber blkno;
BTIndexStat indexStat;
BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD);
if (!IS_INDEX(rel) || !IS_BTREE(rel))
elog(ERROR, "relation \"%s\" is not a btree index",
RelationGetRelationName(rel));
/*
* Reject attempts to read non-local temporary relations; we would be
* likely to get wrong data since we have no visibility into the owning
* session's local buffers.
*/
if (RELATION_IS_OTHER_TEMP(rel))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot access temporary tables of other sessions")));
/*
* Read metapage
*/
{
Buffer buffer = ReadBufferExtended(rel, MAIN_FORKNUM, 0, RBM_NORMAL, bstrategy);
Page page = BufferGetPage(buffer);
BTMetaPageData *metad = BTPageGetMeta(page);
indexStat.version = metad->btm_version;
indexStat.level = metad->btm_level;
indexStat.root_blkno = metad->btm_root;
ReleaseBuffer(buffer);
}
/* -- init counters -- */
indexStat.internal_pages = 0;
indexStat.leaf_pages = 0;
indexStat.empty_pages = 0;
indexStat.deleted_pages = 0;
indexStat.max_avail = 0;
indexStat.free_space = 0;
indexStat.fragments = 0;
/*
* Scan all blocks except the metapage
*/
nblocks = RelationGetNumberOfBlocks(rel);
for (blkno = 1; blkno < nblocks; blkno++)
{
Buffer buffer;
Page page;
BTPageOpaque opaque;
CHECK_FOR_INTERRUPTS();
/* Read and lock buffer */
buffer = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
LockBuffer(buffer, BUFFER_LOCK_SHARE);
page = BufferGetPage(buffer);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/* Determine page type, and update totals */
if (P_ISDELETED(opaque))
indexStat.deleted_pages++;
else if (P_IGNORE(opaque))
indexStat.empty_pages++; /* this is the "half dead" state */
else if (P_ISLEAF(opaque))
{
int max_avail;
max_avail = BLCKSZ - (BLCKSZ - ((PageHeader) page)->pd_special + SizeOfPageHeaderData);
indexStat.max_avail += max_avail;
indexStat.free_space += PageGetFreeSpace(page);
indexStat.leaf_pages++;
/*
* If the next leaf is on an earlier block, it means a
* fragmentation.
*/
if (opaque->btpo_next != P_NONE && opaque->btpo_next < blkno)
indexStat.fragments++;
}
else
indexStat.internal_pages++;
/* Unlock and release buffer */
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
}
//.........这里部分代码省略.........
示例11: get_relation_info
//.........这里部分代码省略.........
{
info->indexkeys[i] = index->indkey.values[i];
info->opfamily[i] = indexRelation->rd_opfamily[i];
info->opcintype[i] = indexRelation->rd_opcintype[i];
}
info->relam = indexRelation->rd_rel->relam;
info->amcostestimate = indexRelation->rd_am->amcostestimate;
info->amoptionalkey = indexRelation->rd_am->amoptionalkey;
info->amsearchnulls = indexRelation->rd_am->amsearchnulls;
info->amhasgettuple = OidIsValid(indexRelation->rd_am->amgettuple);
info->amhasgetbitmap = OidIsValid(indexRelation->rd_am->amgetbitmap);
/*
* Fetch the ordering operators associated with the index, if any.
* We expect that all ordering-capable indexes use btree's
* strategy numbers for the ordering operators.
*/
if (indexRelation->rd_am->amcanorder)
{
int nstrat = indexRelation->rd_am->amstrategies;
for (i = 0; i < ncolumns; i++)
{
int16 opt = indexRelation->rd_indoption[i];
int fwdstrat;
int revstrat;
if (opt & INDOPTION_DESC)
{
fwdstrat = BTGreaterStrategyNumber;
revstrat = BTLessStrategyNumber;
}
else
{
fwdstrat = BTLessStrategyNumber;
revstrat = BTGreaterStrategyNumber;
}
/*
* Index AM must have a fixed set of strategies for it to
* make sense to specify amcanorder, so we need not allow
* the case amstrategies == 0.
*/
if (fwdstrat > 0)
{
Assert(fwdstrat <= nstrat);
info->fwdsortop[i] = indexRelation->rd_operator[i * nstrat + fwdstrat - 1];
}
if (revstrat > 0)
{
Assert(revstrat <= nstrat);
info->revsortop[i] = indexRelation->rd_operator[i * nstrat + revstrat - 1];
}
info->nulls_first[i] = (opt & INDOPTION_NULLS_FIRST) != 0;
}
}
/*
* Fetch the index expressions and predicate, if any. We must
* modify the copies we obtain from the relcache to have the
* correct varno for the parent relation, so that they match up
* correctly against qual clauses.
*/
info->indexprs = RelationGetIndexExpressions(indexRelation);
info->indpred = RelationGetIndexPredicate(indexRelation);
if (info->indexprs && varno != 1)
ChangeVarNodes((Node *) info->indexprs, 1, varno, 0);
if (info->indpred && varno != 1)
ChangeVarNodes((Node *) info->indpred, 1, varno, 0);
info->predOK = false; /* set later in indxpath.c */
info->unique = index->indisunique;
/*
* Estimate the index size. If it's not a partial index, we lock
* the number-of-tuples estimate to equal the parent table; if it
* is partial then we have to use the same methods as we would for
* a table, except we can be sure that the index is not larger
* than the table.
*/
if (info->indpred == NIL)
{
info->pages = RelationGetNumberOfBlocks(indexRelation);
info->tuples = rel->tuples;
}
else
{
estimate_rel_size(indexRelation, NULL,
&info->pages, &info->tuples);
if (info->tuples > rel->tuples)
info->tuples = rel->tuples;
}
index_close(indexRelation, NoLock);
indexinfos = lcons(info, indexinfos);
}
list_free(indexoidlist);
}
示例12: lazy_truncate_heap
/*
* lazy_truncate_heap - try to truncate off any empty pages at the end
*/
static void
lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
{
BlockNumber old_rel_pages = vacrelstats->rel_pages;
BlockNumber new_rel_pages;
PGRUsage ru0;
pg_rusage_init(&ru0);
/*
* We need full exclusive lock on the relation in order to do truncation.
* If we can't get it, give up rather than waiting --- we don't want to
* block other backends, and we don't want to deadlock (which is quite
* possible considering we already hold a lower-grade lock).
*/
if (!ConditionalLockRelation(onerel, AccessExclusiveLock))
return;
/*
* Now that we have exclusive lock, look to see if the rel has grown
* whilst we were vacuuming with non-exclusive lock. If so, give up; the
* newly added pages presumably contain non-deletable tuples.
*/
new_rel_pages = RelationGetNumberOfBlocks(onerel);
if (new_rel_pages != old_rel_pages)
{
/*
* Note: we intentionally don't update vacrelstats->rel_pages with the
* new rel size here. If we did, it would amount to assuming that the
* new pages are empty, which is unlikely. Leaving the numbers alone
* amounts to assuming that the new pages have the same tuple density
* as existing ones, which is less unlikely.
*/
UnlockRelation(onerel, AccessExclusiveLock);
return;
}
/*
* Scan backwards from the end to verify that the end pages actually
* contain no tuples. This is *necessary*, not optional, because other
* backends could have added tuples to these pages whilst we were
* vacuuming.
*/
new_rel_pages = count_nondeletable_pages(onerel, vacrelstats);
if (new_rel_pages >= old_rel_pages)
{
/* can't do anything after all */
UnlockRelation(onerel, AccessExclusiveLock);
return;
}
/*
* Okay to truncate.
*/
RelationTruncate(onerel, new_rel_pages);
/*
* We can release the exclusive lock as soon as we have truncated. Other
* backends can't safely access the relation until they have processed the
* smgr invalidation that smgrtruncate sent out ... but that should happen
* as part of standard invalidation processing once they acquire lock on
* the relation.
*/
UnlockRelation(onerel, AccessExclusiveLock);
/*
* Update statistics. Here, it *is* correct to adjust rel_pages without
* also touching reltuples, since the tuple count wasn't changed by the
* truncation.
*/
vacrelstats->rel_pages = new_rel_pages;
vacrelstats->pages_removed = old_rel_pages - new_rel_pages;
ereport(elevel,
(errmsg("\"%s\": truncated %u to %u pages",
RelationGetRelationName(onerel),
old_rel_pages, new_rel_pages),
errdetail("%s.",
pg_rusage_show(&ru0))));
}
示例13: btvacuumscan
/*
* btvacuumscan --- scan the index for VACUUMing purposes
*
* This combines the functions of looking for leaf tuples that are deletable
* according to the vacuum callback, looking for empty pages that can be
* deleted, and looking for old deleted pages that can be recycled. Both
* btbulkdelete and btvacuumcleanup invoke this (the latter only if no
* btbulkdelete call occurred).
*
* The caller is responsible for initially allocating/zeroing a stats struct
* and for obtaining a vacuum cycle ID if necessary.
*/
static void
btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
IndexBulkDeleteCallback callback, void *callback_state,
BTCycleId cycleid)
{
MIRROREDLOCK_BUFMGR_VERIFY_NO_LOCK_LEAK_DECLARE;
Relation rel = info->index;
BTVacState vstate;
BlockNumber num_pages;
BlockNumber blkno;
bool needLock;
MIRROREDLOCK_BUFMGR_VERIFY_NO_LOCK_LEAK_ENTER;
/*
* Reset counts that will be incremented during the scan; needed in case
* of multiple scans during a single VACUUM command
*/
stats->num_index_tuples = 0;
stats->pages_deleted = 0;
/* Set up info to pass down to btvacuumpage */
vstate.info = info;
vstate.stats = stats;
vstate.callback = callback;
vstate.callback_state = callback_state;
vstate.cycleid = cycleid;
vstate.freePages = NULL; /* temporarily */
vstate.nFreePages = 0;
vstate.maxFreePages = 0;
vstate.totFreePages = 0;
/* Create a temporary memory context to run _bt_pagedel in */
vstate.pagedelcontext = AllocSetContextCreate(CurrentMemoryContext,
"_bt_pagedel",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
/*
* The outer loop iterates over all index pages except the metapage, in
* physical order (we hope the kernel will cooperate in providing
* read-ahead for speed). It is critical that we visit all leaf pages,
* including ones added after we start the scan, else we might fail to
* delete some deletable tuples. Hence, we must repeatedly check the
* relation length. We must acquire the relation-extension lock while
* doing so to avoid a race condition: if someone else is extending the
* relation, there is a window where bufmgr/smgr have created a new
* all-zero page but it hasn't yet been write-locked by _bt_getbuf(). If
* we manage to scan such a page here, we'll improperly assume it can be
* recycled. Taking the lock synchronizes things enough to prevent a
* problem: either num_pages won't include the new page, or _bt_getbuf
* already has write lock on the buffer and it will be fully initialized
* before we can examine it. (See also vacuumlazy.c, which has the same
* issue.) Also, we need not worry if a page is added immediately after
* we look; the page splitting code already has write-lock on the left
* page before it adds a right page, so we must already have processed any
* tuples due to be moved into such a page.
*
* We can skip locking for new or temp relations, however, since no one
* else could be accessing them.
*/
needLock = !RELATION_IS_LOCAL(rel);
blkno = BTREE_METAPAGE + 1;
for (;;)
{
/* Get the current relation length */
if (needLock)
LockRelationForExtension(rel, ExclusiveLock);
num_pages = RelationGetNumberOfBlocks(rel);
if (needLock)
UnlockRelationForExtension(rel, ExclusiveLock);
/* Allocate freePages after we read num_pages the first time */
if (vstate.freePages == NULL)
{
/* No point in remembering more than MaxFSMPages pages */
vstate.maxFreePages = MaxFSMPages;
if ((BlockNumber) vstate.maxFreePages > num_pages)
vstate.maxFreePages = (int) num_pages;
vstate.freePages = (BlockNumber *)
palloc(vstate.maxFreePages * sizeof(BlockNumber));
}
/* Quit if we've scanned the whole relation */
if (blkno >= num_pages)
//.........这里部分代码省略.........
示例14: btbuild
/*
* btbuild() -- build a new btree index.
*/
IndexBuildResult *
btbuild(Relation heap, Relation index, IndexInfo *indexInfo)
{
IndexBuildResult *result;
double reltuples;
BTBuildState buildstate;
buildstate.isUnique = indexInfo->ii_Unique;
buildstate.haveDead = false;
buildstate.heapRel = heap;
buildstate.spool = NULL;
buildstate.spool2 = NULL;
buildstate.indtuples = 0;
#ifdef BTREE_BUILD_STATS
if (log_btree_build_stats)
ResetUsage();
#endif /* BTREE_BUILD_STATS */
/*
* We expect to be called exactly once for any index relation. If that's
* not the case, big trouble's what we have.
*/
if (RelationGetNumberOfBlocks(index) != 0)
elog(ERROR, "index \"%s\" already contains data",
RelationGetRelationName(index));
buildstate.spool = _bt_spoolinit(heap, index, indexInfo->ii_Unique, false);
/*
* If building a unique index, put dead tuples in a second spool to keep
* them out of the uniqueness check.
*/
if (indexInfo->ii_Unique)
buildstate.spool2 = _bt_spoolinit(heap, index, false, true);
/* do the heap scan */
reltuples = IndexBuildHeapScan(heap, index, indexInfo, true,
btbuildCallback, (void *) &buildstate);
/* okay, all heap tuples are indexed */
if (buildstate.spool2 && !buildstate.haveDead)
{
/* spool2 turns out to be unnecessary */
_bt_spooldestroy(buildstate.spool2);
buildstate.spool2 = NULL;
}
/*
* Finish the build by (1) completing the sort of the spool file, (2)
* inserting the sorted tuples into btree pages and (3) building the upper
* levels.
*/
_bt_leafbuild(buildstate.spool, buildstate.spool2);
_bt_spooldestroy(buildstate.spool);
if (buildstate.spool2)
_bt_spooldestroy(buildstate.spool2);
#ifdef BTREE_BUILD_STATS
if (log_btree_build_stats)
{
ShowUsage("BTREE BUILD STATS");
ResetUsage();
}
#endif /* BTREE_BUILD_STATS */
/*
* Return statistics
*/
result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult));
result->heap_tuples = reltuples;
result->index_tuples = buildstate.indtuples;
return result;
}
示例15: _bt_validate_tid
/*
* For a newly inserted heap tid, check if an entry with this tid
* already exists in a unique index. If it does, abort the inserting
* transaction.
*/
static void
_bt_validate_tid(Relation irel, ItemPointer h_tid)
{
MIRROREDLOCK_BUFMGR_DECLARE;
BlockNumber blkno;
BlockNumber num_pages;
Buffer buf;
Page page;
BTPageOpaque opaque;
IndexTuple itup;
OffsetNumber maxoff,
minoff,
offnum;
elog(DEBUG1, "validating tid (%d,%d) for index (%s)",
ItemPointerGetBlockNumber(h_tid), ItemPointerGetOffsetNumber(h_tid),
RelationGetRelationName(irel));
blkno = BTREE_METAPAGE + 1;
num_pages = RelationGetNumberOfBlocks(irel);
MIRROREDLOCK_BUFMGR_LOCK;
for (; blkno < num_pages; blkno++)
{
buf = ReadBuffer(irel, blkno);
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
if (!PageIsNew(page))
_bt_checkpage(irel, buf);
if (P_ISLEAF(opaque))
{
minoff = P_FIRSTDATAKEY(opaque);
maxoff = PageGetMaxOffsetNumber(page);
for (offnum = minoff;
offnum <= maxoff;
offnum = OffsetNumberNext(offnum))
{
itup = (IndexTuple) PageGetItem(page,
PageGetItemId(page, offnum));
if (ItemPointerEquals(&itup->t_tid, h_tid))
{
Form_pg_attribute key_att = RelationGetDescr(irel)->attrs[0];
Oid key = InvalidOid;
bool isnull;
if (key_att->atttypid == OIDOID)
{
key = DatumGetInt32(
index_getattr(itup, 1, RelationGetDescr(irel), &isnull));
elog(ERROR, "found tid (%d,%d), %s (%d) already in index (%s)",
ItemPointerGetBlockNumber(h_tid), ItemPointerGetOffsetNumber(h_tid),
NameStr(key_att->attname), key, RelationGetRelationName(irel));
}
else
{
elog(ERROR, "found tid (%d,%d) already in index (%s)",
ItemPointerGetBlockNumber(h_tid), ItemPointerGetOffsetNumber(h_tid),
RelationGetRelationName(irel));
}
}
}
}
ReleaseBuffer(buf);
}
MIRROREDLOCK_BUFMGR_UNLOCK;
}