本文整理汇总了C++中BlockNumberIsValid函数的典型用法代码示例。如果您正苦于以下问题:C++ BlockNumberIsValid函数的具体用法?C++ BlockNumberIsValid怎么用?C++ BlockNumberIsValid使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了BlockNumberIsValid函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: _hash_addovflpage
/*
* _hash_addovflpage
*
* Add an overflow page to the bucket whose last page is pointed to by 'buf'.
*
* On entry, the caller must hold a pin but no lock on 'buf'. The pin is
* dropped before exiting (we assume the caller is not interested in 'buf'
* anymore). The returned overflow page will be pinned and write-locked;
* it is guaranteed to be empty.
*
* The caller must hold a pin, but no lock, on the metapage buffer.
* That buffer is returned in the same state.
*
* The caller must hold at least share lock on the bucket, to ensure that
* no one else tries to compact the bucket meanwhile. This guarantees that
* 'buf' won't stop being part of the bucket while it's unlocked.
*
* NB: since this could be executed concurrently by multiple processes,
* one should not assume that the returned overflow page will be the
* immediate successor of the originally passed 'buf'. Additional overflow
* pages might have been added to the bucket chain in between.
*/
Buffer
_hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
{
Buffer ovflbuf;
Page page;
Page ovflpage;
HashPageOpaque pageopaque;
HashPageOpaque ovflopaque;
/* allocate and lock an empty overflow page */
ovflbuf = _hash_getovflpage(rel, metabuf);
/*
* Write-lock the tail page. It is okay to hold two buffer locks here
* since there cannot be anyone else contending for access to ovflbuf.
*/
_hash_chgbufaccess(rel, buf, HASH_NOLOCK, HASH_WRITE);
/* probably redundant... */
_hash_checkpage(rel, buf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
/* loop to find current tail page, in case someone else inserted too */
for (;;)
{
BlockNumber nextblkno;
page = BufferGetPage(buf);
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
nextblkno = pageopaque->hasho_nextblkno;
if (!BlockNumberIsValid(nextblkno))
break;
/* we assume we do not need to write the unmodified page */
_hash_relbuf(rel, buf);
buf = _hash_getbuf(rel, nextblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
}
/* now that we have correct backlink, initialize new overflow page */
ovflpage = BufferGetPage(ovflbuf);
ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage);
ovflopaque->hasho_prevblkno = BufferGetBlockNumber(buf);
ovflopaque->hasho_nextblkno = InvalidBlockNumber;
ovflopaque->hasho_bucket = pageopaque->hasho_bucket;
ovflopaque->hasho_flag = LH_OVERFLOW_PAGE;
ovflopaque->hasho_page_id = HASHO_PAGE_ID;
MarkBufferDirty(ovflbuf);
/* logically chain overflow page to previous page */
pageopaque->hasho_nextblkno = BufferGetBlockNumber(ovflbuf);
_hash_wrtbuf(rel, buf);
return ovflbuf;
}
示例2: _hash_readnext
/*
* Advance to next page in a bucket, if any.
*/
static void
_hash_readnext(Relation rel,
Buffer *bufp, Page *pagep, HashPageOpaque *opaquep)
{
BlockNumber blkno;
blkno = (*opaquep)->hasho_nextblkno;
_hash_relbuf(rel, *bufp);
*bufp = InvalidBuffer;
if (BlockNumberIsValid(blkno))
{
*bufp = _hash_getbuf(rel, blkno, HASH_READ, LH_OVERFLOW_PAGE);
*pagep = BufferGetPage(*bufp);
*opaquep = (HashPageOpaque) PageGetSpecialPointer(*pagep);
}
}
示例3: _hash_readnext
/*
* Advance to next page in a bucket, if any.
*/
static void
_hash_readnext(Relation rel,
Buffer *bufp, Page *pagep, HashPageOpaque *opaquep)
{
BlockNumber blkno;
blkno = (*opaquep)->hasho_nextblkno;
_hash_relbuf(rel, *bufp);
*bufp = InvalidBuffer;
/* check for interrupts while we're not holding any buffer lock */
CHECK_FOR_INTERRUPTS();
if (BlockNumberIsValid(blkno))
{
*bufp = _hash_getbuf(rel, blkno, HASH_READ, LH_OVERFLOW_PAGE);
*pagep = BufferGetPage(*bufp);
*opaquep = (HashPageOpaque) PageGetSpecialPointer(*pagep);
}
}
示例4: _hash_readprev
/*
* Advance to previous page in a bucket, if any.
*/
static void
_hash_readprev(Relation rel,
Buffer *bufp, Page *pagep, HashPageOpaque *opaquep)
{
BlockNumber blkno;
MIRROREDLOCK_BUFMGR_MUST_ALREADY_BE_HELD;
blkno = (*opaquep)->hasho_prevblkno;
_hash_relbuf(rel, *bufp);
*bufp = InvalidBuffer;
/* check for interrupts while we're not holding any buffer lock */
CHECK_FOR_INTERRUPTS();
if (BlockNumberIsValid(blkno))
{
*bufp = _hash_getbuf(rel, blkno, HASH_READ);
_hash_checkpage(rel, *bufp, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
*pagep = BufferGetPage(*bufp);
*opaquep = (HashPageOpaque) PageGetSpecialPointer(*pagep);
}
}
示例5: _hash_freeovflpage
/*
* _hash_freeovflpage() -
*
* Remove this overflow page from its bucket's chain, and mark the page as
* free. On entry, ovflbuf is write-locked; it is released before exiting.
*
* Since this function is invoked in VACUUM, we provide an access strategy
* parameter that controls fetches of the bucket pages.
*
* Returns the block number of the page that followed the given page
* in the bucket, or InvalidBlockNumber if no following page.
*
* NB: caller must not hold lock on metapage, nor on either page that's
* adjacent in the bucket chain. The caller had better hold exclusive lock
* on the bucket, too.
*/
BlockNumber
_hash_freeovflpage(Relation rel, Buffer ovflbuf,
BufferAccessStrategy bstrategy)
{
HashMetaPage metap;
Buffer metabuf;
Buffer mapbuf;
BlockNumber ovflblkno;
BlockNumber prevblkno;
BlockNumber blkno;
BlockNumber nextblkno;
HashPageOpaque ovflopaque;
Page ovflpage;
Page mappage;
uint32 *freep;
uint32 ovflbitno;
int32 bitmappage,
bitmapbit;
/*CS3223*/
int index;
int bitIndexInElement;
uint32 ovflElement;
uint32 temp, temp2;
int i;
BlockNumber nextblkno_temp;
HashPageOpaque pageopaque;
Page page;
uint32 *tempPointer;
Bucket bucket PG_USED_FOR_ASSERTS_ONLY;
/* Get information from the doomed page */
_hash_checkpage(rel, ovflbuf, LH_OVERFLOW_PAGE);
ovflblkno = BufferGetBlockNumber(ovflbuf);
ovflpage = BufferGetPage(ovflbuf);
ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage);
nextblkno = ovflopaque->hasho_nextblkno;
prevblkno = ovflopaque->hasho_prevblkno;
bucket = ovflopaque->hasho_bucket;
/*CS3223*/
/* find the length of the bucket chain*/
while (i>=0)
{
//nextblkno_temp;
page = BufferGetPage(ovflbuf);
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
nextblkno_temp = pageopaque->hasho_nextblkno;
if (!BlockNumberIsValid(nextblkno_temp))
break;
/* we assume we do not need to write the unmodified page */
_hash_relbuf(rel, ovflbuf);
ovflbuf = _hash_getbuf(rel, nextblkno_temp, HASH_WRITE, LH_OVERFLOW_PAGE);
/*CS3223*/
i++;
}
/*
* Zero the page for debugging's sake; then write and release it. (Note:
* if we failed to zero the page here, we'd have problems with the Assert
* in _hash_pageinit() when the page is reused.)
*/
MemSet(ovflpage, 0, BufferGetPageSize(ovflbuf));
_hash_wrtbuf(rel, ovflbuf);
/*
* Fix up the bucket chain. this is a doubly-linked list, so we must fix
* up the bucket chain members behind and ahead of the overflow page being
* deleted. No concurrency issues since we hold exclusive lock on the
* entire bucket.
*/
if (BlockNumberIsValid(prevblkno))
{
Buffer prevbuf = _hash_getbuf_with_strategy(rel,
prevblkno,
HASH_WRITE,
LH_BUCKET_PAGE | LH_OVERFLOW_PAGE,
bstrategy);
Page prevpage = BufferGetPage(prevbuf);
//.........这里部分代码省略.........
示例6: _hash_freeovflpage
/*
* _hash_freeovflpage() -
*
* Remove this overflow page from its bucket's chain, and mark the page as
* free. On entry, ovflbuf is write-locked; it is released before exiting.
*
* Since this function is invoked in VACUUM, we provide an access strategy
* parameter that controls fetches of the bucket pages.
*
* Returns the block number of the page that followed the given page
* in the bucket, or InvalidBlockNumber if no following page.
*
* NB: caller must not hold lock on metapage, nor on either page that's
* adjacent in the bucket chain. The caller had better hold exclusive lock
* on the bucket, too.
*/
BlockNumber
_hash_freeovflpage(Relation rel, Buffer ovflbuf,
BufferAccessStrategy bstrategy)
{
HashMetaPage metap;
Buffer metabuf;
Buffer mapbuf;
BlockNumber ovflblkno;
BlockNumber prevblkno;
BlockNumber blkno;
BlockNumber nextblkno;
HashPageOpaque ovflopaque;
Page ovflpage;
Page mappage;
uint32 *freep;
uint32 ovflbitno;
int32 bitmappage,
bitmapbit;
Bucket bucket PG_USED_FOR_ASSERTS_ONLY;
/* Get information from the doomed page */
_hash_checkpage(rel, ovflbuf, LH_OVERFLOW_PAGE);
ovflblkno = BufferGetBlockNumber(ovflbuf);
ovflpage = BufferGetPage(ovflbuf);
ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage);
nextblkno = ovflopaque->hasho_nextblkno;
prevblkno = ovflopaque->hasho_prevblkno;
bucket = ovflopaque->hasho_bucket;
/*
* Zero the page for debugging's sake; then write and release it. (Note:
* if we failed to zero the page here, we'd have problems with the Assert
* in _hash_pageinit() when the page is reused.)
*/
MemSet(ovflpage, 0, BufferGetPageSize(ovflbuf));
_hash_wrtbuf(rel, ovflbuf);
/*
* Fix up the bucket chain. this is a doubly-linked list, so we must fix
* up the bucket chain members behind and ahead of the overflow page being
* deleted. No concurrency issues since we hold exclusive lock on the
* entire bucket.
*/
if (BlockNumberIsValid(prevblkno))
{
Buffer prevbuf = _hash_getbuf_with_strategy(rel,
prevblkno,
HASH_WRITE,
LH_BUCKET_PAGE | LH_OVERFLOW_PAGE,
bstrategy);
Page prevpage = BufferGetPage(prevbuf);
HashPageOpaque prevopaque = (HashPageOpaque) PageGetSpecialPointer(prevpage);
Assert(prevopaque->hasho_bucket == bucket);
prevopaque->hasho_nextblkno = nextblkno;
_hash_wrtbuf(rel, prevbuf);
}
if (BlockNumberIsValid(nextblkno))
{
Buffer nextbuf = _hash_getbuf_with_strategy(rel,
nextblkno,
HASH_WRITE,
LH_OVERFLOW_PAGE,
bstrategy);
Page nextpage = BufferGetPage(nextbuf);
HashPageOpaque nextopaque = (HashPageOpaque) PageGetSpecialPointer(nextpage);
Assert(nextopaque->hasho_bucket == bucket);
nextopaque->hasho_prevblkno = prevblkno;
_hash_wrtbuf(rel, nextbuf);
}
/* Note: bstrategy is intentionally not used for metapage and bitmap */
/* Read the metapage so we can determine which bitmap page to use */
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE);
metap = HashPageGetMeta(BufferGetPage(metabuf));
/* Identify which bit to set */
ovflbitno = blkno_to_bitno(metap, ovflblkno);
bitmappage = ovflbitno >> BMPG_SHIFT(metap);
bitmapbit = ovflbitno & BMPG_MASK(metap);
//.........这里部分代码省略.........
示例7: _hash_first
//.........这里部分代码省略.........
HashPageOpaque opaque;
HashMetaPage metap;
IndexTuple itup;
ItemPointer current;
OffsetNumber offnum;
MIRROREDLOCK_BUFMGR_MUST_ALREADY_BE_HELD;
pgstat_count_index_scan(rel);
current = &(scan->currentItemData);
ItemPointerSetInvalid(current);
/*
* We do not support hash scans with no index qualification, because we
* would have to read the whole index rather than just one bucket. That
* creates a whole raft of problems, since we haven't got a practical way
* to lock all the buckets against splits or compactions.
*/
if (scan->numberOfKeys < 1)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("hash indexes do not support whole-index scans")));
/*
* If the constant in the index qual is NULL, assume it cannot match any
* items in the index.
*/
if (scan->keyData[0].sk_flags & SK_ISNULL)
return false;
/*
* Okay to compute the hash key. We want to do this before acquiring any
* locks, in case a user-defined hash function happens to be slow.
*/
hashkey = _hash_datum2hashkey(rel, scan->keyData[0].sk_argument);
/*
* Acquire shared split lock so we can compute the target bucket safely
* (see README).
*/
_hash_getlock(rel, 0, HASH_SHARE);
/* Read the metapage */
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ);
_hash_checkpage(rel, metabuf, LH_META_PAGE);
metap = (HashMetaPage) BufferGetPage(metabuf);
/*
* Compute the target bucket number, and convert to block number.
*/
bucket = _hash_hashkey2bucket(hashkey,
metap->hashm_maxbucket,
metap->hashm_highmask,
metap->hashm_lowmask);
blkno = BUCKET_TO_BLKNO(metap, bucket);
/* done with the metapage */
_hash_relbuf(rel, metabuf);
/*
* Acquire share lock on target bucket; then we can release split lock.
*/
_hash_getlock(rel, blkno, HASH_SHARE);
_hash_droplock(rel, 0, HASH_SHARE);
/* Update scan opaque state to show we have lock on the bucket */
so->hashso_bucket = bucket;
so->hashso_bucket_valid = true;
so->hashso_bucket_blkno = blkno;
/* Fetch the primary bucket page for the bucket */
buf = _hash_getbuf(rel, blkno, HASH_READ);
_hash_checkpage(rel, buf, LH_BUCKET_PAGE);
page = BufferGetPage(buf);
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
Assert(opaque->hasho_bucket == bucket);
/* If a backwards scan is requested, move to the end of the chain */
if (ScanDirectionIsBackward(dir))
{
while (BlockNumberIsValid(opaque->hasho_nextblkno))
_hash_readnext(rel, &buf, &page, &opaque);
}
/* Now find the first tuple satisfying the qualification */
if (!_hash_step(scan, &buf, dir))
return false;
/* if we're here, _hash_step found a valid tuple */
offnum = ItemPointerGetOffsetNumber(current);
_hash_checkpage(rel, buf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
page = BufferGetPage(buf);
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
scan->xs_ctup.t_self = itup->t_tid;
return true;
}
示例8: _hash_splitbucket
/*
* _hash_splitbucket -- split 'obucket' into 'obucket' and 'nbucket'
*
* We are splitting a bucket that consists of a base bucket page and zero
* or more overflow (bucket chain) pages. We must relocate tuples that
* belong in the new bucket, and compress out any free space in the old
* bucket.
*
* The caller must hold exclusive locks on both buckets to ensure that
* no one else is trying to access them (see README).
*
* The caller must hold a pin, but no lock, on the metapage buffer.
* The buffer is returned in the same state. (The metapage is only
* touched if it becomes necessary to add or remove overflow pages.)
*/
static void
_hash_splitbucket(Relation rel,
Buffer metabuf,
Bucket obucket,
Bucket nbucket,
BlockNumber start_oblkno,
BlockNumber start_nblkno,
uint32 maxbucket,
uint32 highmask,
uint32 lowmask)
{
Bucket bucket;
Buffer obuf;
Buffer nbuf;
BlockNumber oblkno;
BlockNumber nblkno;
bool null;
Datum datum;
HashItem hitem;
HashPageOpaque oopaque;
HashPageOpaque nopaque;
IndexTuple itup;
Size itemsz;
OffsetNumber ooffnum;
OffsetNumber noffnum;
OffsetNumber omaxoffnum;
Page opage;
Page npage;
TupleDesc itupdesc = RelationGetDescr(rel);
/*
* It should be okay to simultaneously write-lock pages from each
* bucket, since no one else can be trying to acquire buffer lock
* on pages of either bucket.
*/
oblkno = start_oblkno;
nblkno = start_nblkno;
obuf = _hash_getbuf(rel, oblkno, HASH_WRITE);
nbuf = _hash_getbuf(rel, nblkno, HASH_WRITE);
opage = BufferGetPage(obuf);
npage = BufferGetPage(nbuf);
_hash_checkpage(rel, opage, LH_BUCKET_PAGE);
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
/* initialize the new bucket's primary page */
_hash_pageinit(npage, BufferGetPageSize(nbuf));
nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
nopaque->hasho_prevblkno = InvalidBlockNumber;
nopaque->hasho_nextblkno = InvalidBlockNumber;
nopaque->hasho_bucket = nbucket;
nopaque->hasho_flag = LH_BUCKET_PAGE;
nopaque->hasho_filler = HASHO_FILL;
/*
* Partition the tuples in the old bucket between the old bucket and the
* new bucket, advancing along the old bucket's overflow bucket chain
* and adding overflow pages to the new bucket as needed.
*/
ooffnum = FirstOffsetNumber;
omaxoffnum = PageGetMaxOffsetNumber(opage);
for (;;)
{
/*
* at each iteration through this loop, each of these variables
* should be up-to-date: obuf opage oopaque ooffnum omaxoffnum
*/
/* check if we're at the end of the page */
if (ooffnum > omaxoffnum)
{
/* at end of page, but check for an(other) overflow page */
oblkno = oopaque->hasho_nextblkno;
if (!BlockNumberIsValid(oblkno))
break;
/*
* we ran out of tuples on this particular page, but we
* have more overflow pages; advance to next page.
*/
_hash_wrtbuf(rel, obuf);
obuf = _hash_getbuf(rel, oblkno, HASH_WRITE);
opage = BufferGetPage(obuf);
_hash_checkpage(rel, opage, LH_OVERFLOW_PAGE);
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
//.........这里部分代码省略.........
示例9: hashbucketcleanup
//.........这里部分代码省略.........
}
if (kill_tuple)
{
/* mark the item for deletion */
deletable[ndeletable++] = offno;
}
else
{
/* we're keeping it, so count it */
if (num_index_tuples)
*num_index_tuples += 1;
}
}
/* retain the pin on primary bucket page till end of bucket scan */
if (blkno == bucket_blkno)
retain_pin = true;
else
retain_pin = false;
blkno = opaque->hasho_nextblkno;
/*
* Apply deletions, advance to next page and write page if needed.
*/
if (ndeletable > 0)
{
PageIndexMultiDelete(page, deletable, ndeletable);
bucket_dirty = true;
curr_page_dirty = true;
}
/* bail out if there are no more pages to scan. */
if (!BlockNumberIsValid(blkno))
break;
next_buf = _hash_getbuf_with_strategy(rel, blkno, HASH_WRITE,
LH_OVERFLOW_PAGE,
bstrategy);
/*
* release the lock on previous page after acquiring the lock on next
* page
*/
if (curr_page_dirty)
{
if (retain_pin)
_hash_chgbufaccess(rel, buf, HASH_WRITE, HASH_NOLOCK);
else
_hash_wrtbuf(rel, buf);
curr_page_dirty = false;
}
else if (retain_pin)
_hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
else
_hash_relbuf(rel, buf);
buf = next_buf;
}
/*
* lock the bucket page to clear the garbage flag and squeeze the bucket.
* if the current buffer is same as bucket buffer, then we already have
* lock on bucket page.
*/
if (buf != bucket_buf)
{
_hash_relbuf(rel, buf);
_hash_chgbufaccess(rel, bucket_buf, HASH_NOLOCK, HASH_WRITE);
}
/*
* Clear the garbage flag from bucket after deleting the tuples that are
* moved by split. We purposefully clear the flag before squeeze bucket,
* so that after restart, vacuum shouldn't again try to delete the moved
* by split tuples.
*/
if (split_cleanup)
{
HashPageOpaque bucket_opaque;
Page page;
page = BufferGetPage(bucket_buf);
bucket_opaque = (HashPageOpaque) PageGetSpecialPointer(page);
bucket_opaque->hasho_flag &= ~LH_BUCKET_NEEDS_SPLIT_CLEANUP;
}
/*
* If we have deleted anything, try to compact free space. For squeezing
* the bucket, we must have a cleanup lock, else it can impact the
* ordering of tuples for a scan that has started before it.
*/
if (bucket_dirty && IsBufferCleanupOK(bucket_buf))
_hash_squeezebucket(rel, cur_bucket, bucket_blkno, bucket_buf,
bstrategy);
else
_hash_chgbufaccess(rel, bucket_buf, HASH_WRITE, HASH_NOLOCK);
}
示例10: _hash_addovflpage
/*
* _hash_addovflpage
*
* Add an overflow page to the bucket whose last page is pointed to by 'buf'.
*
* On entry, the caller must hold a pin but no lock on 'buf'. The pin is
* dropped before exiting (we assume the caller is not interested in 'buf'
* anymore) if not asked to retain. The pin will be retained only for the
* primary bucket. The returned overflow page will be pinned and
* write-locked; it is guaranteed to be empty.
*
* The caller must hold a pin, but no lock, on the metapage buffer.
* That buffer is returned in the same state.
*
* NB: since this could be executed concurrently by multiple processes,
* one should not assume that the returned overflow page will be the
* immediate successor of the originally passed 'buf'. Additional overflow
* pages might have been added to the bucket chain in between.
*/
Buffer
_hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf, bool retain_pin)
{
Buffer ovflbuf;
Page page;
Page ovflpage;
HashPageOpaque pageopaque;
HashPageOpaque ovflopaque;
HashMetaPage metap;
Buffer mapbuf = InvalidBuffer;
Buffer newmapbuf = InvalidBuffer;
BlockNumber blkno;
uint32 orig_firstfree;
uint32 splitnum;
uint32 *freep = NULL;
uint32 max_ovflpg;
uint32 bit;
uint32 bitmap_page_bit;
uint32 first_page;
uint32 last_bit;
uint32 last_page;
uint32 i,
j;
bool page_found = false;
/*
* Write-lock the tail page. Here, we need to maintain locking order such
* that, first acquire the lock on tail page of bucket, then on meta page
* to find and lock the bitmap page and if it is found, then lock on meta
* page is released, then finally acquire the lock on new overflow buffer.
* We need this locking order to avoid deadlock with backends that are
* doing inserts.
*
* Note: We could have avoided locking many buffers here if we made two
* WAL records for acquiring an overflow page (one to allocate an overflow
* page and another to add it to overflow bucket chain). However, doing
* so can leak an overflow page, if the system crashes after allocation.
* Needless to say, it is better to have a single record from a
* performance point of view as well.
*/
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
/* probably redundant... */
_hash_checkpage(rel, buf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
/* loop to find current tail page, in case someone else inserted too */
for (;;)
{
BlockNumber nextblkno;
page = BufferGetPage(buf);
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
nextblkno = pageopaque->hasho_nextblkno;
if (!BlockNumberIsValid(nextblkno))
break;
/* we assume we do not need to write the unmodified page */
if (retain_pin)
{
/* pin will be retained only for the primary bucket page */
Assert((pageopaque->hasho_flag & LH_PAGE_TYPE) == LH_BUCKET_PAGE);
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
}
else
_hash_relbuf(rel, buf);
retain_pin = false;
buf = _hash_getbuf(rel, nextblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
}
/* Get exclusive lock on the meta page */
LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
_hash_checkpage(rel, metabuf, LH_META_PAGE);
metap = HashPageGetMeta(BufferGetPage(metabuf));
/* start search at hashm_firstfree */
orig_firstfree = metap->hashm_firstfree;
first_page = orig_firstfree >> BMPG_SHIFT(metap);
//.........这里部分代码省略.........
示例11: btree_xlog_vacuum
static void
btree_xlog_vacuum(XLogReaderState *record)
{
XLogRecPtr lsn = record->EndRecPtr;
Buffer buffer;
Page page;
BTPageOpaque opaque;
#ifdef UNUSED
xl_btree_vacuum *xlrec = (xl_btree_vacuum *) XLogRecGetData(record);
/*
* This section of code is thought to be no longer needed, after analysis
* of the calling paths. It is retained to allow the code to be reinstated
* if a flaw is revealed in that thinking.
*
* If we are running non-MVCC scans using this index we need to do some
* additional work to ensure correctness, which is known as a "pin scan"
* described in more detail in next paragraphs. We used to do the extra
* work in all cases, whereas we now avoid that work in most cases. If
* lastBlockVacuumed is set to InvalidBlockNumber then we skip the
* additional work required for the pin scan.
*
* Avoiding this extra work is important since it requires us to touch
* every page in the index, so is an O(N) operation. Worse, it is an
* operation performed in the foreground during redo, so it delays
* replication directly.
*
* If queries might be active then we need to ensure every leaf page is
* unpinned between the lastBlockVacuumed and the current block, if there
* are any. This prevents replay of the VACUUM from reaching the stage of
* removing heap tuples while there could still be indexscans "in flight"
* to those particular tuples for those scans which could be confused by
* finding new tuples at the old TID locations (see nbtree/README).
*
* It might be worth checking if there are actually any backends running;
* if not, we could just skip this.
*
* Since VACUUM can visit leaf pages out-of-order, it might issue records
* with lastBlockVacuumed >= block; that's not an error, it just means
* nothing to do now.
*
* Note: since we touch all pages in the range, we will lock non-leaf
* pages, and also any empty (all-zero) pages that may be in the index. It
* doesn't seem worth the complexity to avoid that. But it's important
* that HotStandbyActiveInReplay() will not return true if the database
* isn't yet consistent; so we need not fear reading still-corrupt blocks
* here during crash recovery.
*/
if (HotStandbyActiveInReplay() && BlockNumberIsValid(xlrec->lastBlockVacuumed))
{
RelFileNode thisrnode;
BlockNumber thisblkno;
BlockNumber blkno;
XLogRecGetBlockTag(record, 0, &thisrnode, NULL, &thisblkno);
for (blkno = xlrec->lastBlockVacuumed + 1; blkno < thisblkno; blkno++)
{
/*
* We use RBM_NORMAL_NO_LOG mode because it's not an error
* condition to see all-zero pages. The original btvacuumpage
* scan would have skipped over all-zero pages, noting them in FSM
* but not bothering to initialize them just yet; so we mustn't
* throw an error here. (We could skip acquiring the cleanup lock
* if PageIsNew, but it's probably not worth the cycles to test.)
*
* XXX we don't actually need to read the block, we just need to
* confirm it is unpinned. If we had a special call into the
* buffer manager we could optimise this so that if the block is
* not in shared_buffers we confirm it as unpinned. Optimizing
* this is now moot, since in most cases we avoid the scan.
*/
buffer = XLogReadBufferExtended(thisrnode, MAIN_FORKNUM, blkno,
RBM_NORMAL_NO_LOG);
if (BufferIsValid(buffer))
{
LockBufferForCleanup(buffer);
UnlockReleaseBuffer(buffer);
}
}
}
#endif
/*
* Like in btvacuumpage(), we need to take a cleanup lock on every leaf
* page. See nbtree/README for details.
*/
if (XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &buffer)
== BLK_NEEDS_REDO)
{
char *ptr;
Size len;
ptr = XLogRecGetBlockData(record, 0, &len);
page = (Page) BufferGetPage(buffer);
if (len > 0)
{
OffsetNumber *unused;
//.........这里部分代码省略.........
示例12: _hash_squeezebucket
/*
* _hash_squeezebucket(rel, bucket)
*
* Try to squeeze the tuples onto pages occurring earlier in the
* bucket chain in an attempt to free overflow pages. When we start
* the "squeezing", the page from which we start taking tuples (the
* "read" page) is the last bucket in the bucket chain and the page
* onto which we start squeezing tuples (the "write" page) is the
* first page in the bucket chain. The read page works backward and
* the write page works forward; the procedure terminates when the
* read page and write page are the same page.
*
* At completion of this procedure, it is guaranteed that all pages in
* the bucket are nonempty, unless the bucket is totally empty (in
* which case all overflow pages will be freed). The original implementation
* required that to be true on entry as well, but it's a lot easier for
* callers to leave empty overflow pages and let this guy clean it up.
*
* Caller must hold exclusive lock on the target bucket. This allows
* us to safely lock multiple pages in the bucket.
*/
void
_hash_squeezebucket(Relation rel,
Bucket bucket,
BlockNumber bucket_blkno)
{
Buffer wbuf;
Buffer rbuf = 0;
BlockNumber wblkno;
BlockNumber rblkno;
Page wpage;
Page rpage;
HashPageOpaque wopaque;
HashPageOpaque ropaque;
OffsetNumber woffnum;
OffsetNumber roffnum;
IndexTuple itup;
Size itemsz;
/*
* start squeezing into the base bucket page.
*/
wblkno = bucket_blkno;
wbuf = _hash_getbuf(rel, wblkno, HASH_WRITE);
_hash_checkpage(rel, wbuf, LH_BUCKET_PAGE);
wpage = BufferGetPage(wbuf);
wopaque = (HashPageOpaque) PageGetSpecialPointer(wpage);
/*
* if there aren't any overflow pages, there's nothing to squeeze.
*/
if (!BlockNumberIsValid(wopaque->hasho_nextblkno))
{
_hash_relbuf(rel, wbuf);
return;
}
/*
* find the last page in the bucket chain by starting at the base bucket
* page and working forward.
*/
ropaque = wopaque;
do
{
rblkno = ropaque->hasho_nextblkno;
if (ropaque != wopaque)
_hash_relbuf(rel, rbuf);
rbuf = _hash_getbuf(rel, rblkno, HASH_WRITE);
_hash_checkpage(rel, rbuf, LH_OVERFLOW_PAGE);
rpage = BufferGetPage(rbuf);
ropaque = (HashPageOpaque) PageGetSpecialPointer(rpage);
Assert(ropaque->hasho_bucket == bucket);
} while (BlockNumberIsValid(ropaque->hasho_nextblkno));
/*
* squeeze the tuples.
*/
roffnum = FirstOffsetNumber;
for (;;)
{
/* this test is needed in case page is empty on entry */
if (roffnum <= PageGetMaxOffsetNumber(rpage))
{
itup = (IndexTuple) PageGetItem(rpage,
PageGetItemId(rpage, roffnum));
itemsz = IndexTupleDSize(*itup);
itemsz = MAXALIGN(itemsz);
/*
* Walk up the bucket chain, looking for a page big enough for
* this item. Exit if we reach the read page.
*/
while (PageGetFreeSpace(wpage) < itemsz)
{
Assert(!PageIsEmpty(wpage));
wblkno = wopaque->hasho_nextblkno;
Assert(BlockNumberIsValid(wblkno));
_hash_wrtbuf(rel, wbuf);
//.........这里部分代码省略.........
示例13: _hash_doinsert
/*
* _hash_doinsert() -- Handle insertion of a single index tuple.
*
* This routine is called by the public interface routines, hashbuild
* and hashinsert. By here, itup is completely filled in.
*/
void
_hash_doinsert(Relation rel, IndexTuple itup)
{
Buffer buf;
Buffer metabuf;
HashMetaPage metap;
BlockNumber blkno;
Page page;
HashPageOpaque pageopaque;
Size itemsz;
bool do_expand;
uint32 hashkey;
Bucket bucket;
/*
* Get the hash key for the item (it's stored in the index tuple itself).
*/
hashkey = _hash_get_indextuple_hashkey(itup);
/* compute item size too */
itemsz = IndexTupleDSize(*itup);
itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but we
* need to be consistent */
/*
* Acquire shared split lock so we can compute the target bucket safely
* (see README).
*/
_hash_getlock(rel, 0, HASH_SHARE);
/* Read the metapage */
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE);
metap = HashPageGetMeta(BufferGetPage(metabuf));
/*
* Check whether the item can fit on a hash page at all. (Eventually, we
* ought to try to apply TOAST methods if not.) Note that at this point,
* itemsz doesn't include the ItemId.
*
* XXX this is useless code if we are only storing hash keys.
*/
if (itemsz > HashMaxItemSize((Page) metap))
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("index row size %lu exceeds hash maximum %lu",
(unsigned long) itemsz,
(unsigned long) HashMaxItemSize((Page) metap)),
errhint("Values larger than a buffer page cannot be indexed.")));
/*
* Compute the target bucket number, and convert to block number.
*/
bucket = _hash_hashkey2bucket(hashkey,
metap->hashm_maxbucket,
metap->hashm_highmask,
metap->hashm_lowmask);
blkno = BUCKET_TO_BLKNO(metap, bucket);
/* release lock on metapage, but keep pin since we'll need it again */
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
/*
* Acquire share lock on target bucket; then we can release split lock.
*/
_hash_getlock(rel, blkno, HASH_SHARE);
_hash_droplock(rel, 0, HASH_SHARE);
/* Fetch the primary bucket page for the bucket */
buf = _hash_getbuf(rel, blkno, HASH_WRITE, LH_BUCKET_PAGE);
page = BufferGetPage(buf);
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
Assert(pageopaque->hasho_bucket == bucket);
/* Do the insertion */
while (PageGetFreeSpace(page) < itemsz)
{
/*
* no space on this page; check for an overflow page
*/
BlockNumber nextblkno = pageopaque->hasho_nextblkno;
if (BlockNumberIsValid(nextblkno))
{
/*
* ovfl page exists; go get it. if it doesn't have room, we'll
* find out next pass through the loop test above.
*/
_hash_relbuf(rel, buf);
buf = _hash_getbuf(rel, nextblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
page = BufferGetPage(buf);
}
else
//.........这里部分代码省略.........
示例14: _hash_finish_split
/*
* _hash_finish_split() -- Finish the previously interrupted split operation
*
* To complete the split operation, we form the hash table of TIDs in new
* bucket which is then used by split operation to skip tuples that are
* already moved before the split operation was previously interrupted.
*
* The caller must hold a pin, but no lock, on the metapage and old bucket's
* primary page buffer. The buffers are returned in the same state. (The
* metapage is only touched if it becomes necessary to add or remove overflow
* pages.)
*/
void
_hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket,
uint32 maxbucket, uint32 highmask, uint32 lowmask)
{
HASHCTL hash_ctl;
HTAB *tidhtab;
Buffer bucket_nbuf = InvalidBuffer;
Buffer nbuf;
Page npage;
BlockNumber nblkno;
BlockNumber bucket_nblkno;
HashPageOpaque npageopaque;
Bucket nbucket;
bool found;
/* Initialize hash tables used to track TIDs */
memset(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(ItemPointerData);
hash_ctl.entrysize = sizeof(ItemPointerData);
hash_ctl.hcxt = CurrentMemoryContext;
tidhtab =
hash_create("bucket ctids",
256, /* arbitrary initial size */
&hash_ctl,
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
bucket_nblkno = nblkno = _hash_get_newblock_from_oldbucket(rel, obucket);
/*
* Scan the new bucket and build hash table of TIDs
*/
for (;;)
{
OffsetNumber noffnum;
OffsetNumber nmaxoffnum;
nbuf = _hash_getbuf(rel, nblkno, HASH_READ,
LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
/* remember the primary bucket buffer to acquire cleanup lock on it. */
if (nblkno == bucket_nblkno)
bucket_nbuf = nbuf;
npage = BufferGetPage(nbuf);
npageopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
/* Scan each tuple in new page */
nmaxoffnum = PageGetMaxOffsetNumber(npage);
for (noffnum = FirstOffsetNumber;
noffnum <= nmaxoffnum;
noffnum = OffsetNumberNext(noffnum))
{
IndexTuple itup;
/* Fetch the item's TID and insert it in hash table. */
itup = (IndexTuple) PageGetItem(npage,
PageGetItemId(npage, noffnum));
(void) hash_search(tidhtab, &itup->t_tid, HASH_ENTER, &found);
Assert(!found);
}
nblkno = npageopaque->hasho_nextblkno;
/*
* release our write lock without modifying buffer and ensure to
* retain the pin on primary bucket.
*/
if (nbuf == bucket_nbuf)
LockBuffer(nbuf, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, nbuf);
/* Exit loop if no more overflow pages in new bucket */
if (!BlockNumberIsValid(nblkno))
break;
}
/*
* Conditionally get the cleanup lock on old and new buckets to perform
* the split operation. If we don't get the cleanup locks, silently give
* up and next insertion on old bucket will try again to complete the
* split.
*/
if (!ConditionalLockBufferForCleanup(obuf))
{
//.........这里部分代码省略.........
示例15: _hash_splitbucket
//.........这里部分代码省略.........
/* be tidy */
for (i = 0; i < nitups; i++)
pfree(itups[i]);
nitups = 0;
all_tups_size = 0;
/* chain to a new overflow page */
nbuf = _hash_addovflpage(rel, metabuf, nbuf, (nbuf == bucket_nbuf) ? true : false);
npage = BufferGetPage(nbuf);
nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
}
itups[nitups++] = new_itup;
all_tups_size += itemsz;
}
else
{
/*
* the tuple stays on this page, so nothing to do.
*/
Assert(bucket == obucket);
}
}
oblkno = oopaque->hasho_nextblkno;
/* retain the pin on the old primary bucket */
if (obuf == bucket_obuf)
LockBuffer(obuf, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, obuf);
/* Exit loop if no more overflow pages in old bucket */
if (!BlockNumberIsValid(oblkno))
{
/*
* Change the shared buffer state in critical section, otherwise
* any error could make it unrecoverable.
*/
START_CRIT_SECTION();
_hash_pgaddmultitup(rel, nbuf, itups, itup_offsets, nitups);
MarkBufferDirty(nbuf);
/* log the split operation before releasing the lock */
log_split_page(rel, nbuf);
END_CRIT_SECTION();
if (nbuf == bucket_nbuf)
LockBuffer(nbuf, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, nbuf);
/* be tidy */
for (i = 0; i < nitups; i++)
pfree(itups[i]);
break;
}
/* Else, advance to next old page */
obuf = _hash_getbuf(rel, oblkno, HASH_READ, LH_OVERFLOW_PAGE);
opage = BufferGetPage(obuf);
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
}
/*