本文整理汇总了C++中PageSetLSN函数的典型用法代码示例。如果您正苦于以下问题:C++ PageSetLSN函数的具体用法?C++ PageSetLSN怎么用?C++ PageSetLSN使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了PageSetLSN函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: nextval_internal
//.........这里部分代码省略.........
snprintf(buf, sizeof(buf), INT64_FORMAT, minv);
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("nextval: reached minimum value of sequence \"%s\" (%s)",
RelationGetRelationName(seqrel), buf)));
}
next = maxv;
}
else
next += incby;
}
fetch--;
if (rescnt < cache)
{
log--;
rescnt++;
last = next;
if (rescnt == 1) /* if it's first result - */
result = next; /* it's what to return */
}
}
log -= fetch; /* adjust for any unfetched numbers */
Assert(log >= 0);
/* save info in local cache */
elm->last = result; /* last returned number */
elm->cached = last; /* last fetched number */
elm->last_valid = true;
last_used_seq = elm;
/*
* If something needs to be WAL logged, acquire an xid, so this
* transaction's commit will trigger a WAL flush and wait for
* syncrep. It's sufficient to ensure the toplevel transaction has an xid,
* no need to assign xids subxacts, that'll already trigger an appropriate
* wait. (Have to do that here, so we're outside the critical section)
*/
if (logit && RelationNeedsWAL(seqrel))
GetTopTransactionId();
/* ready to change the on-disk (or really, in-buffer) tuple */
START_CRIT_SECTION();
/*
* We must mark the buffer dirty before doing XLogInsert(); see notes in
* SyncOneBuffer(). However, we don't apply the desired changes just yet.
* This looks like a violation of the buffer update protocol, but it is in
* fact safe because we hold exclusive lock on the buffer. Any other
* process, including a checkpoint, that tries to examine the buffer
* contents will block until we release the lock, and then will see the
* final state that we install below.
*/
MarkBufferDirty(buf);
/* XLOG stuff */
if (logit && RelationNeedsWAL(seqrel))
{
xl_seq_rec xlrec;
XLogRecPtr recptr;
/*
* We don't log the current state of the tuple, but rather the state
* as it would appear after "log" more fetches. This lets us skip
* that many future WAL records, at the cost that we lose those
* sequence values if we crash.
*/
XLogBeginInsert();
XLogRegisterBuffer(0, buf, REGBUF_WILL_INIT);
/* set values that will be saved in xlog */
seq->last_value = next;
seq->is_called = true;
seq->log_cnt = 0;
xlrec.node = seqrel->rd_node;
XLogRegisterData((char *) &xlrec, sizeof(xl_seq_rec));
XLogRegisterData((char *) seqtuple.t_data, seqtuple.t_len);
recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG);
PageSetLSN(page, recptr);
}
/* Now update sequence tuple to the intended final state */
seq->last_value = last; /* last fetched number */
seq->is_called = true;
seq->log_cnt = log; /* how much is logged */
END_CRIT_SECTION();
UnlockReleaseBuffer(buf);
relation_close(seqrel, NoLock);
return result;
}
示例2: _hash_squeezebucket
//.........这里部分代码省略.........
MarkBufferDirty(rbuf);
/* XLOG stuff */
if (RelationNeedsWAL(rel))
{
XLogRecPtr recptr;
xl_hash_move_page_contents xlrec;
xlrec.ntups = nitups;
xlrec.is_prim_bucket_same_wrt = (wbuf == bucket_buf) ? true : false;
XLogBeginInsert();
XLogRegisterData((char *) &xlrec, SizeOfHashMovePageContents);
/*
* bucket buffer needs to be registered to ensure that
* we can acquire a cleanup lock on it during replay.
*/
if (!xlrec.is_prim_bucket_same_wrt)
XLogRegisterBuffer(0, bucket_buf, REGBUF_STANDARD | REGBUF_NO_IMAGE);
XLogRegisterBuffer(1, wbuf, REGBUF_STANDARD);
XLogRegisterBufData(1, (char *) itup_offsets,
nitups * sizeof(OffsetNumber));
for (i = 0; i < nitups; i++)
XLogRegisterBufData(1, (char *) itups[i], tups_size[i]);
XLogRegisterBuffer(2, rbuf, REGBUF_STANDARD);
XLogRegisterBufData(2, (char *) deletable,
ndeletable * sizeof(OffsetNumber));
recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_MOVE_PAGE_CONTENTS);
PageSetLSN(BufferGetPage(wbuf), recptr);
PageSetLSN(BufferGetPage(rbuf), recptr);
}
END_CRIT_SECTION();
tups_moved = true;
}
/*
* release the lock on previous page after acquiring the lock
* on next page
*/
if (retain_pin)
LockBuffer(wbuf, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, wbuf);
/* nothing more to do if we reached the read page */
if (rblkno == wblkno)
{
_hash_relbuf(rel, rbuf);
return;
}
wbuf = next_wbuf;
wpage = BufferGetPage(wbuf);
wopaque = (HashPageOpaque) PageGetSpecialPointer(wpage);
Assert(wopaque->hasho_bucket == bucket);
retain_pin = false;
/* be tidy */
for (i = 0; i < nitups; i++)
示例3: gistplacetopage
//.........这里部分代码省略.........
START_CRIT_SECTION();
/*
* must mark buffers dirty before XLogInsert, even though we'll still
* be changing their opaque fields below. set up right links.
*/
for (ptr = dist; ptr; ptr = ptr->next)
{
MarkBufferDirty(ptr->buffer);
GistPageGetOpaque(ptr->page)->rightlink = (ptr->next) ?
ptr->next->block.blkno : rrlink;
}
/* restore splitted non-root page */
if (state->stack->blkno != GIST_ROOT_BLKNO)
{
PageRestoreTempPage(dist->page, BufferGetPage(dist->buffer));
dist->page = BufferGetPage(dist->buffer);
}
if (!state->r->rd_istemp)
{
XLogRecPtr recptr;
XLogRecData *rdata;
rdata = formSplitRdata(state->r->rd_node, state->stack->blkno,
is_leaf, &(state->key), dist);
recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_SPLIT, rdata);
for (ptr = dist; ptr; ptr = ptr->next)
{
PageSetLSN(ptr->page, recptr);
PageSetTLI(ptr->page, ThisTimeLineID);
}
}
else
{
for (ptr = dist; ptr; ptr = ptr->next)
{
PageSetLSN(ptr->page, XLogRecPtrForTemp);
}
}
/* set up NSN */
oldnsn = GistPageGetOpaque(dist->page)->nsn;
if (state->stack->blkno == GIST_ROOT_BLKNO)
/* if root split we should put initial value */
oldnsn = PageGetLSN(dist->page);
for (ptr = dist; ptr; ptr = ptr->next)
{
/* only for last set oldnsn */
GistPageGetOpaque(ptr->page)->nsn = (ptr->next) ?
PageGetLSN(ptr->page) : oldnsn;
}
/*
* release buffers, if it was a root split then release all buffers
* because we create all buffers
*/
ptr = (state->stack->blkno == GIST_ROOT_BLKNO) ? dist : dist->next;
for (; ptr; ptr = ptr->next)
UnlockReleaseBuffer(ptr->buffer);
示例4: _bt_getroot
//.........这里部分代码省略.........
rootopaque->btpo_flags = (BTP_LEAF | BTP_ROOT);
rootopaque->btpo.level = 0;
rootopaque->btpo_cycleid = 0;
/* NO ELOG(ERROR) till meta is updated */
START_CRIT_SECTION();
metad->btm_root = rootblkno;
metad->btm_level = 0;
metad->btm_fastroot = rootblkno;
metad->btm_fastlevel = 0;
MarkBufferDirty(rootbuf);
MarkBufferDirty(metabuf);
/* XLOG stuff */
if (RelationNeedsWAL(rel))
{
xl_btree_newroot xlrec;
XLogRecPtr recptr;
XLogRecData rdata;
xlrec.node = rel->rd_node;
xlrec.rootblk = rootblkno;
xlrec.level = 0;
rdata.data = (char *) &xlrec;
rdata.len = SizeOfBtreeNewroot;
rdata.buffer = InvalidBuffer;
rdata.next = NULL;
recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_NEWROOT, &rdata);
PageSetLSN(rootpage, recptr);
PageSetTLI(rootpage, ThisTimeLineID);
PageSetLSN(metapg, recptr);
PageSetTLI(metapg, ThisTimeLineID);
}
END_CRIT_SECTION();
/*
* Send out relcache inval for metapage change (probably unnecessary
* here, but let's be safe).
*/
CacheInvalidateRelcache(rel);
/*
* swap root write lock for read lock. There is no danger of anyone
* else accessing the new root page while it's unlocked, since no one
* else knows where it is yet.
*/
LockBuffer(rootbuf, BUFFER_LOCK_UNLOCK);
LockBuffer(rootbuf, BT_READ);
/* okay, metadata is correct, release lock on it */
_bt_relbuf(rel, metabuf);
}
else
{
rootblkno = metad->btm_fastroot;
Assert(rootblkno != P_NONE);
rootlevel = metad->btm_fastlevel;
/*
* Cache the metapage data for next time
示例5: _hash_addovflpage
//.........这里部分代码省略.........
/* add the new bitmap page to the metapage's list of bitmaps */
metap->hashm_mapp[metap->hashm_nmaps] = BufferGetBlockNumber(newmapbuf);
metap->hashm_nmaps++;
metap->hashm_spares[splitnum]++;
MarkBufferDirty(metabuf);
}
/*
* for new overflow page, we don't need to explicitly set the bit in
* bitmap page, as by default that will be set to "in use".
*/
}
/*
* Adjust hashm_firstfree to avoid redundant searches. But don't risk
* changing it if someone moved it while we were searching bitmap pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
{
metap->hashm_firstfree = bit + 1;
MarkBufferDirty(metabuf);
}
/* initialize new overflow page */
ovflpage = BufferGetPage(ovflbuf);
ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage);
ovflopaque->hasho_prevblkno = BufferGetBlockNumber(buf);
ovflopaque->hasho_nextblkno = InvalidBlockNumber;
ovflopaque->hasho_bucket = pageopaque->hasho_bucket;
ovflopaque->hasho_flag = LH_OVERFLOW_PAGE;
ovflopaque->hasho_page_id = HASHO_PAGE_ID;
MarkBufferDirty(ovflbuf);
/* logically chain overflow page to previous page */
pageopaque->hasho_nextblkno = BufferGetBlockNumber(ovflbuf);
MarkBufferDirty(buf);
/* XLOG stuff */
if (RelationNeedsWAL(rel))
{
XLogRecPtr recptr;
xl_hash_add_ovfl_page xlrec;
xlrec.bmpage_found = page_found;
xlrec.bmsize = metap->hashm_bmsize;
XLogBeginInsert();
XLogRegisterData((char *) &xlrec, SizeOfHashAddOvflPage);
XLogRegisterBuffer(0, ovflbuf, REGBUF_WILL_INIT);
XLogRegisterBufData(0, (char *) &pageopaque->hasho_bucket, sizeof(Bucket));
XLogRegisterBuffer(1, buf, REGBUF_STANDARD);
if (BufferIsValid(mapbuf))
{
XLogRegisterBuffer(2, mapbuf, REGBUF_STANDARD);
XLogRegisterBufData(2, (char *) &bitmap_page_bit, sizeof(uint32));
}
if (BufferIsValid(newmapbuf))
XLogRegisterBuffer(3, newmapbuf, REGBUF_WILL_INIT);
XLogRegisterBuffer(4, metabuf, REGBUF_STANDARD);
XLogRegisterBufData(4, (char *) &metap->hashm_firstfree, sizeof(uint32));
recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_ADD_OVFL_PAGE);
PageSetLSN(BufferGetPage(ovflbuf), recptr);
PageSetLSN(BufferGetPage(buf), recptr);
if (BufferIsValid(mapbuf))
PageSetLSN(BufferGetPage(mapbuf), recptr);
if (BufferIsValid(newmapbuf))
PageSetLSN(BufferGetPage(newmapbuf), recptr);
PageSetLSN(BufferGetPage(metabuf), recptr);
}
END_CRIT_SECTION();
if (retain_pin)
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, buf);
if (BufferIsValid(mapbuf))
_hash_relbuf(rel, mapbuf);
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
if (BufferIsValid(newmapbuf))
_hash_relbuf(rel, newmapbuf);
return ovflbuf;
}
示例6: heap_page_prune
//.........这里部分代码省略.........
OldestXmin,
&prstate);
}
/* Any error while applying the changes is critical */
START_CRIT_SECTION();
/* Have we found any prunable items? */
if (prstate.nredirected > 0 || prstate.ndead > 0 || prstate.nunused > 0)
{
/*
* Apply the planned item changes, then repair page fragmentation, and
* update the page's hint bit about whether it has free line pointers.
*/
heap_page_prune_execute(buffer,
prstate.redirected, prstate.nredirected,
prstate.nowdead, prstate.ndead,
prstate.nowunused, prstate.nunused);
/*
* Update the page's pd_prune_xid field to either zero, or the lowest
* XID of any soon-prunable tuple.
*/
((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid;
/*
* Also clear the "page is full" flag, since there's no point in
* repeating the prune/defrag process until something else happens to
* the page.
*/
PageClearFull(page);
MarkBufferDirty(buffer);
/*
* Emit a WAL HEAP_CLEAN record showing what we did
*/
if (RelationNeedsWAL(relation))
{
XLogRecPtr recptr;
recptr = log_heap_clean(relation, buffer,
prstate.redirected, prstate.nredirected,
prstate.nowdead, prstate.ndead,
prstate.nowunused, prstate.nunused,
prstate.latestRemovedXid);
PageSetLSN(BufferGetPage(buffer), recptr);
}
}
else
{
/*
* If we didn't prune anything, but have found a new value for the
* pd_prune_xid field, update it and mark the buffer dirty. This is
* treated as a non-WAL-logged hint.
*
* Also clear the "page is full" flag if it is set, since there's no
* point in repeating the prune/defrag process until something else
* happens to the page.
*/
if (((PageHeader) page)->pd_prune_xid != prstate.new_prune_xid ||
PageIsFull(page))
{
((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid;
PageClearFull(page);
MarkBufferDirtyHint(buffer, true);
}
}
END_CRIT_SECTION();
/*
* If requested, report the number of tuples reclaimed to pgstats. This is
* ndeleted minus ndead, because we don't want to count a now-DEAD root
* item as a deletion for this purpose.
*/
if (report_stats && ndeleted > prstate.ndead)
pgstat_update_heap_dead_tuples(relation, ndeleted - prstate.ndead);
*latestRemovedXid = prstate.latestRemovedXid;
/*
* XXX Should we update the FSM information of this page ?
*
* There are two schools of thought here. We may not want to update FSM
* information so that the page is not used for unrelated UPDATEs/INSERTs
* and any free space in this page will remain available for further
* UPDATEs in *this* page, thus improving chances for doing HOT updates.
*
* But for a large table and where a page does not receive further UPDATEs
* for a long time, we might waste this space by not updating the FSM
* information. The relation may get extended and fragmented further.
*
* One possibility is to leave "fillfactor" worth of space in this page
* and update FSM with the remaining space.
*/
return ndeleted;
}
示例7: _bt_delitems_vacuum
/*
* Delete item(s) from a btree page during VACUUM.
*
* This must only be used for deleting leaf items. Deleting an item on a
* non-leaf page has to be done as part of an atomic action that includes
* deleting the page it points to.
*
* This routine assumes that the caller has pinned and locked the buffer.
* Also, the given itemnos *must* appear in increasing order in the array.
*
* We record VACUUMs and b-tree deletes differently in WAL. InHotStandby
* we need to be able to pin all of the blocks in the btree in physical
* order when replaying the effects of a VACUUM, just as we do for the
* original VACUUM itself. lastBlockVacuumed allows us to tell whether an
* intermediate range of blocks has had no changes at all by VACUUM,
* and so must be scanned anyway during replay. We always write a WAL record
* for the last block in the index, whether or not it contained any items
* to be removed. This allows us to scan right up to end of index to
* ensure correct locking.
*/
void
_bt_delitems_vacuum(Relation rel, Buffer buf,
OffsetNumber *itemnos, int nitems,
BlockNumber lastBlockVacuumed)
{
Page page = BufferGetPage(buf);
BTPageOpaque opaque;
/* No ereport(ERROR) until changes are logged */
START_CRIT_SECTION();
/* Fix the page */
if (nitems > 0)
PageIndexMultiDelete(page, itemnos, nitems);
/*
* We can clear the vacuum cycle ID since this page has certainly been
* processed by the current vacuum scan.
*/
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
opaque->btpo_cycleid = 0;
/*
* Mark the page as not containing any LP_DEAD items. This is not
* certainly true (there might be some that have recently been marked, but
* weren't included in our target-item list), but it will almost always be
* true and it doesn't seem worth an additional page scan to check it.
* Remember that BTP_HAS_GARBAGE is only a hint anyway.
*/
opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
MarkBufferDirty(buf);
/* XLOG stuff */
if (RelationNeedsWAL(rel))
{
XLogRecPtr recptr;
XLogRecData rdata[2];
xl_btree_vacuum xlrec_vacuum;
xlrec_vacuum.node = rel->rd_node;
xlrec_vacuum.block = BufferGetBlockNumber(buf);
xlrec_vacuum.lastBlockVacuumed = lastBlockVacuumed;
rdata[0].data = (char *) &xlrec_vacuum;
rdata[0].len = SizeOfBtreeVacuum;
rdata[0].buffer = InvalidBuffer;
rdata[0].next = &(rdata[1]);
/*
* The target-offsets array is not in the buffer, but pretend that it
* is. When XLogInsert stores the whole buffer, the offsets array
* need not be stored too.
*/
if (nitems > 0)
{
rdata[1].data = (char *) itemnos;
rdata[1].len = nitems * sizeof(OffsetNumber);
}
else
{
rdata[1].data = NULL;
rdata[1].len = 0;
}
rdata[1].buffer = buf;
rdata[1].buffer_std = true;
rdata[1].next = NULL;
recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_VACUUM, rdata);
PageSetLSN(page, recptr);
PageSetTLI(page, ThisTimeLineID);
}
END_CRIT_SECTION();
}
示例8: bitmap_xlog_insert_bitmap
static void
bitmap_xlog_insert_bitmap(bool redo, XLogRecPtr lsn, XLogRecord* record)
{
xl_bm_bitmappage *xlrec = (xl_bm_bitmappage*) XLogRecGetData(record);
Relation reln;
reln = XLogOpenRelation(xlrec->bm_node);
if (!RelationIsValid(reln))
return;
if (redo)
{
Buffer bitmapBuffer;
Page bitmapPage;
BMBitmapOpaque bitmapPageOpaque ;
bitmapBuffer = XLogReadBuffer(false, reln, xlrec->bm_bitmap_blkno);
if (!BufferIsValid(bitmapBuffer))
elog(PANIC, "bm_insert_redo: block unfound: %d",
xlrec->bm_bitmap_blkno);
bitmapPage = BufferGetPage(bitmapBuffer);
if (XLByteLT(PageGetLSN(bitmapPage), lsn))
{
bitmapPageOpaque = (BMBitmapOpaque)PageGetSpecialPointer(bitmapPage);;
#ifdef BM_DEBUG
ereport(LOG, (errcode(LOG),
errmsg("call bitmap_xlog_insert_bitmap: redo=%d, blkno=%d, isOpaque=%d, words_used=%d, lastword=%d, next_blkno=%d\n", redo, xlrec->bm_bitmap_blkno, xlrec->bm_isOpaque, xlrec->bm_lastword_pos, xlrec->bm_lastword_in_block, xlrec->bm_next_blkno)));
#endif
if (xlrec->bm_isOpaque)
{
if (bitmapPageOpaque->bm_bitmap_next != InvalidBlockNumber)
elog(PANIC,
"%s next bitmap page for blkno %d is already set",
"bm_insert_redo: ",
xlrec->bm_bitmap_blkno);
Assert(bitmapPageOpaque->bm_hrl_words_used ==
BM_NUM_OF_HRL_WORDS_PER_PAGE);
bitmapPageOpaque->bm_bitmap_next = xlrec->bm_next_blkno;
}
else
{
BMBitmap bitmap;
if (bitmapPageOpaque->bm_hrl_words_used !=
xlrec->bm_lastword_pos - 1)
elog(PANIC,
"bm_insert_redo: a bit has been inserted in the pos %d",
xlrec->bm_lastword_pos);
Assert (xlrec->bm_lastword_in_block != 0);
bitmap = (BMBitmap) PageGetContents(bitmapPage);
bitmap->bm_headerWords
[(bitmapPageOpaque->bm_hrl_words_used/BM_HRL_WORD_SIZE)] |=
(1<<(BM_HRL_WORD_SIZE-1-
(bitmapPageOpaque->bm_hrl_words_used%BM_HRL_WORD_SIZE)));
bitmap->bm_contentWords[bitmapPageOpaque->bm_hrl_words_used] =
xlrec->bm_lastword_in_block;
bitmapPageOpaque->bm_hrl_words_used ++;
}
PageSetLSN(bitmapPage, lsn);
PageSetTLI(bitmapPage, ThisTimeLineID);
_bitmap_wrtbuf(bitmapBuffer);
}
else
_bitmap_relbuf(bitmapBuffer);
}
else
elog(PANIC, "bm_insert_undo: not implemented.");
}
示例9: _hash_splitbucket
//.........这里部分代码省略.........
END_CRIT_SECTION();
if (nbuf == bucket_nbuf)
LockBuffer(nbuf, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, nbuf);
/* be tidy */
for (i = 0; i < nitups; i++)
pfree(itups[i]);
break;
}
/* Else, advance to next old page */
obuf = _hash_getbuf(rel, oblkno, HASH_READ, LH_OVERFLOW_PAGE);
opage = BufferGetPage(obuf);
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
}
/*
* We're at the end of the old bucket chain, so we're done partitioning
* the tuples. Mark the old and new buckets to indicate split is
* finished.
*
* To avoid deadlocks due to locking order of buckets, first lock the old
* bucket and then the new bucket.
*/
LockBuffer(bucket_obuf, BUFFER_LOCK_EXCLUSIVE);
opage = BufferGetPage(bucket_obuf);
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
LockBuffer(bucket_nbuf, BUFFER_LOCK_EXCLUSIVE);
npage = BufferGetPage(bucket_nbuf);
nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
START_CRIT_SECTION();
oopaque->hasho_flag &= ~LH_BUCKET_BEING_SPLIT;
nopaque->hasho_flag &= ~LH_BUCKET_BEING_POPULATED;
/*
* After the split is finished, mark the old bucket to indicate that it
* contains deletable tuples. We will clear split-cleanup flag after
* deleting such tuples either at the end of split or at the next split
* from old bucket or at the time of vacuum.
*/
oopaque->hasho_flag |= LH_BUCKET_NEEDS_SPLIT_CLEANUP;
/*
* now write the buffers, here we don't release the locks as caller is
* responsible to release locks.
*/
MarkBufferDirty(bucket_obuf);
MarkBufferDirty(bucket_nbuf);
if (RelationNeedsWAL(rel))
{
XLogRecPtr recptr;
xl_hash_split_complete xlrec;
xlrec.old_bucket_flag = oopaque->hasho_flag;
xlrec.new_bucket_flag = nopaque->hasho_flag;
XLogBeginInsert();
XLogRegisterData((char *) &xlrec, SizeOfHashSplitComplete);
XLogRegisterBuffer(0, bucket_obuf, REGBUF_STANDARD);
XLogRegisterBuffer(1, bucket_nbuf, REGBUF_STANDARD);
recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_COMPLETE);
PageSetLSN(BufferGetPage(bucket_obuf), recptr);
PageSetLSN(BufferGetPage(bucket_nbuf), recptr);
}
END_CRIT_SECTION();
/*
* If possible, clean up the old bucket. We might not be able to do this
* if someone else has a pin on it, but if not then we can go ahead. This
* isn't absolutely necessary, but it reduces bloat; if we don't do it
* now, VACUUM will do it eventually, but maybe not until new overflow
* pages have been allocated. Note that there's no need to clean up the
* new bucket.
*/
if (IsBufferCleanupOK(bucket_obuf))
{
LockBuffer(bucket_nbuf, BUFFER_LOCK_UNLOCK);
hashbucketcleanup(rel, obucket, bucket_obuf,
BufferGetBlockNumber(bucket_obuf), NULL,
maxbucket, highmask, lowmask, NULL, NULL, true,
NULL, NULL);
}
else
{
LockBuffer(bucket_nbuf, BUFFER_LOCK_UNLOCK);
LockBuffer(bucket_obuf, BUFFER_LOCK_UNLOCK);
}
}
示例10: bitmap_xlog_newpage
static void
bitmap_xlog_newpage(bool redo, XLogRecPtr lsn, XLogRecord *record)
{
xl_bm_newpage *xlrec = (xl_bm_newpage*) XLogRecGetData(record);
Relation reln;
Page page;
uint8 info;
/* xl_bm_metapage *xlrecMeta = (xl_bm_metapage*)
((char*)xlrec+sizeof(xl_bm_newpage)); */
info = record->xl_info & ~XLR_INFO_MASK;
ereport(DEBUG1, (errmsg_internal("into --> XLogOpenRelation")));
reln = XLogOpenRelation(xlrec->bm_node);
ereport(DEBUG1, (errmsg_internal("done --> XLogOpenRelation")));
if (!RelationIsValid(reln))
return;
ereport(DEBUG1, (errmsg_internal("crash1")));
if (redo)
{
Buffer buffer;
#ifdef BM_DEBUG
ereport(LOG, (errcode(LOG),
errmsg("call bitmap_xlog_newpage: redo=%d, info=%x\n", redo, info)));
#endif
buffer = XLogReadBuffer(true, reln, xlrec->bm_new_blkno);
if (!BufferIsValid(buffer))
elog(PANIC, "bm_insert_redo: block unfound: %d",
xlrec->bm_new_blkno);
page = BufferGetPage(buffer);
if (XLByteLT(PageGetLSN(page), lsn))
{
Buffer metabuf;
BMMetaPage metapage;
switch (info)
{
case XLOG_BITMAP_INSERT_NEWLOV:
_bitmap_lovpageinit(reln, buffer);
break;
case XLOG_BITMAP_INSERT_NEWLOVMETA:
_bitmap_lovmetapageinit(reln, buffer);
break;
case XLOG_BITMAP_INSERT_NEWBITMAP:
_bitmap_bitmappageinit(reln, buffer);
break;
default:
elog(PANIC, "bitmap_redo: unknown newpage op code %u", info);
}
PageSetLSN(page, lsn);
PageSetTLI(page, ThisTimeLineID);
_bitmap_wrtbuf(buffer);
metabuf = XLogReadBuffer(true, reln, BM_METAPAGE);
if (!BufferIsValid(metabuf))
elog(PANIC, "bm_insert_redo: block unfound: %d", BM_METAPAGE);
metapage = (BMMetaPage)BufferGetPage(metabuf);
if (XLByteLT(PageGetLSN(metapage), lsn))
{
PageSetLSN(metapage, lsn);
PageSetTLI(metapage, ThisTimeLineID);
_bitmap_wrtbuf(metabuf);
}
else
_bitmap_relbuf(metabuf);
}
else {
_bitmap_relbuf(buffer);
}
}
else
elog(PANIC, "bm_insert_undo: not implemented.");
/* elog(PANIC, "call completely done for _bitmap_lovmetapageinit from bitmap_xlog_newpage[src/backend/access/bitmap/bitmapxlog.c]", info); */
}
示例11: bitmap_xlog_insert_lovmeta
static void
bitmap_xlog_insert_lovmeta(bool redo, XLogRecPtr lsn, XLogRecord* record)
{
xl_bm_lovmetapage *xlrec = (xl_bm_lovmetapage*)XLogRecGetData(record);
Relation reln;
reln = XLogOpenRelation(xlrec->bm_node);
/* reln = XLogOpenRelation(redo, RM_BITMAP_ID, xlrec->bm_node);*/
if (!RelationIsValid(reln))
return;
if (redo)
{
Buffer lovMetabuf;
Page lovMetapage;
BMLOVMetaItem copyMetaItems, metaItems;
#ifdef BM_DEBUG
ereport(LOG, (errcode(LOG),
errmsg("call bitmap_xlog_insert_lovmeta: redo=%d\n", redo)));
#endif
lovMetabuf = XLogReadBuffer(false, reln, BM_LOV_STARTPAGE-1);
if (!BufferIsValid(lovMetabuf))
elog(PANIC, "bm_insert_redo: block unfound: %d -- at (%d,%d,%d)",
BM_LOV_STARTPAGE-1, xlrec->bm_node.spcNode,
xlrec->bm_node.dbNode, xlrec->bm_node.relNode);
lovMetapage = BufferGetPage(lovMetabuf);
if (XLByteLT(PageGetLSN(lovMetapage), lsn))
{
#ifdef BM_DEBUG
uint32 attno;
#endif
copyMetaItems = (BMLOVMetaItem)PageGetContents(lovMetapage);
metaItems = (BMLOVMetaItem)
((char*)xlrec + sizeof(xl_bm_lovmetapage));
memcpy(copyMetaItems, metaItems,
xlrec->bm_num_of_attrs * sizeof(BMLOVMetaItemData));
#ifdef BM_DEBUG
for(attno=0; attno<xlrec->bm_num_of_attrs; attno++)
elog(LOG, "metaItems=%d, %d, %d",
copyMetaItems[attno].bm_lov_heapId,
copyMetaItems[attno].bm_lov_indexId,
copyMetaItems[attno].bm_lov_lastpage);
#endif
PageSetLSN(lovMetapage, lsn);
PageSetTLI(lovMetapage, ThisTimeLineID);
_bitmap_wrtbuf(lovMetabuf);
}
else
_bitmap_relbuf(lovMetabuf);
}
else
elog(PANIC, "bm_insert_undo: not implemented.");
}
示例12: bitmap_xlog_insert_lovitem
static void
bitmap_xlog_insert_lovitem(bool redo, XLogRecPtr lsn, XLogRecord* record)
{
xl_bm_lovitem *xlrec = (xl_bm_lovitem*) XLogRecGetData(record);
Relation reln;
reln = XLogOpenRelation(xlrec->bm_node);
if (!RelationIsValid(reln))
return;
if (redo)
{
Buffer lovBuffer;
Page lovPage;
#ifdef BM_DEBUG
ereport(LOG, (errcode(LOG),
errmsg("call bitmap_xlog_insert_lovitem: redo=%d, blkno=%d\n",
redo, xlrec->bm_lov_blkno)));
#endif
lovBuffer = XLogReadBuffer(false, reln, xlrec->bm_lov_blkno);
if (!BufferIsValid(lovBuffer))
elog(PANIC, "bm_insert_redo: block unfound: %d",
xlrec->bm_lov_blkno);
lovPage = BufferGetPage(lovBuffer);
if (XLByteLT(PageGetLSN(lovPage), lsn))
{
if(xlrec->bm_isNewItem)
{
OffsetNumber newOffset, itemSize;
newOffset = OffsetNumberNext(PageGetMaxOffsetNumber(lovPage));
if (newOffset != xlrec->bm_lov_offset)
elog(PANIC,
"bm_insert_redo: LOV item is not inserted in pos %d(requested %d)",
newOffset, xlrec->bm_lov_offset);
itemSize = sizeof(BMLOVItemData);
if (itemSize > PageGetFreeSpace(lovPage))
elog(PANIC,
"bm_insert_redo: not enough space in LOV page %d",
xlrec->bm_lov_blkno);
if (PageAddItem(lovPage, (Item)&(xlrec->bm_lovItem), itemSize,
newOffset, LP_USED) == InvalidOffsetNumber)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("failed to add LOV item to \"%s\"",
RelationGetRelationName(reln))));
}
else{
BMLOVItem oldLovItem;
oldLovItem = (BMLOVItem)
PageGetItem(lovPage,
PageGetItemId(lovPage, xlrec->bm_lov_offset));
memcpy(oldLovItem, &(xlrec->bm_lovItem), sizeof(BMLOVItemData));
}
PageSetLSN(lovPage, lsn);
PageSetTLI(lovPage, ThisTimeLineID);
_bitmap_wrtbuf(lovBuffer);
}
else {
_bitmap_relbuf(lovBuffer);
}
}
else
elog(PANIC, "bm_insert_undo: not implemented.");
}
示例13: brin_xlog_insert_update
/*
* Common part of an insert or update. Inserts the new tuple and updates the
* revmap.
*/
static void
brin_xlog_insert_update(XLogReaderState *record,
xl_brin_insert *xlrec)
{
XLogRecPtr lsn = record->EndRecPtr;
Buffer buffer;
BlockNumber regpgno;
Page page;
XLogRedoAction action;
/*
* If we inserted the first and only tuple on the page, re-initialize the
* page from scratch.
*/
if (XLogRecGetInfo(record) & XLOG_BRIN_INIT_PAGE)
{
buffer = XLogInitBufferForRedo(record, 0);
page = BufferGetPage(buffer);
brin_page_init(page, BRIN_PAGETYPE_REGULAR);
action = BLK_NEEDS_REDO;
}
else
{
action = XLogReadBufferForRedo(record, 0, &buffer);
}
/* need this page's blkno to store in revmap */
regpgno = BufferGetBlockNumber(buffer);
/* insert the index item into the page */
if (action == BLK_NEEDS_REDO)
{
OffsetNumber offnum;
BrinTuple *tuple;
Size tuplen;
tuple = (BrinTuple *) XLogRecGetBlockData(record, 0, &tuplen);
Assert(tuple->bt_blkno == xlrec->heapBlk);
page = (Page) BufferGetPage(buffer);
offnum = xlrec->offnum;
if (PageGetMaxOffsetNumber(page) + 1 < offnum)
elog(PANIC, "brin_xlog_insert_update: invalid max offset number");
offnum = PageAddItem(page, (Item) tuple, tuplen, offnum, true, false);
if (offnum == InvalidOffsetNumber)
elog(PANIC, "brin_xlog_insert_update: failed to add tuple");
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
/* update the revmap */
action = XLogReadBufferForRedo(record, 1, &buffer);
if (action == BLK_NEEDS_REDO)
{
ItemPointerData tid;
ItemPointerSet(&tid, regpgno, xlrec->offnum);
page = (Page) BufferGetPage(buffer);
brinSetHeapBlockItemptr(buffer, xlrec->pagesPerRange, xlrec->heapBlk,
tid);
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
/* XXX no FSM updates here ... */
}
示例14: do_setval
//.........这里部分代码省略.........
* restore the state of a sequence exactly during data-only restores -
* it is the only way to clear the is_called flag in an existing
* sequence.
*/
static void
do_setval(Oid relid, int64 next, bool iscalled)
{
SeqTable elm;
Relation seqrel;
Buffer buf;
HeapTupleData seqtuple;
Form_pg_sequence seq;
/* open and AccessShareLock sequence */
init_sequence(relid, &elm, &seqrel);
if (pg_class_aclcheck(elm->relid, GetUserId(), ACL_UPDATE) != ACLCHECK_OK)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied for sequence %s",
RelationGetRelationName(seqrel))));
/* read-only transactions may only modify temp sequences */
if (!seqrel->rd_islocaltemp)
PreventCommandIfReadOnly("setval()");
/*
* Forbid this during parallel operation because, to make it work,
* the cooperating backends would need to share the backend-local cached
* sequence information. Currently, we don't support that.
*/
PreventCommandIfParallelMode("setval()");
/* lock page' buffer and read tuple */
seq = read_seq_tuple(elm, seqrel, &buf, &seqtuple);
if ((next < seq->min_value) || (next > seq->max_value))
{
char bufv[100],
bufm[100],
bufx[100];
snprintf(bufv, sizeof(bufv), INT64_FORMAT, next);
snprintf(bufm, sizeof(bufm), INT64_FORMAT, seq->min_value);
snprintf(bufx, sizeof(bufx), INT64_FORMAT, seq->max_value);
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("setval: value %s is out of bounds for sequence \"%s\" (%s..%s)",
bufv, RelationGetRelationName(seqrel),
bufm, bufx)));
}
/* Set the currval() state only if iscalled = true */
if (iscalled)
{
elm->last = next; /* last returned number */
elm->last_valid = true;
}
/* In any case, forget any future cached numbers */
elm->cached = elm->last;
/* check the comment above nextval_internal()'s equivalent call. */
if (RelationNeedsWAL(seqrel))
GetTopTransactionId();
/* ready to change the on-disk (or really, in-buffer) tuple */
START_CRIT_SECTION();
seq->last_value = next; /* last fetched number */
seq->is_called = iscalled;
seq->log_cnt = 0;
MarkBufferDirty(buf);
/* XLOG stuff */
if (RelationNeedsWAL(seqrel))
{
xl_seq_rec xlrec;
XLogRecPtr recptr;
Page page = BufferGetPage(buf);
XLogBeginInsert();
XLogRegisterBuffer(0, buf, REGBUF_WILL_INIT);
xlrec.node = seqrel->rd_node;
XLogRegisterData((char *) &xlrec, sizeof(xl_seq_rec));
XLogRegisterData((char *) seqtuple.t_data, seqtuple.t_len);
recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG);
PageSetLSN(page, recptr);
}
END_CRIT_SECTION();
UnlockReleaseBuffer(buf);
relation_close(seqrel, NoLock);
}
示例15: hashbulkdelete
//.........这里部分代码省略.........
if (bucket_opaque->hasho_prevblkno != InvalidBlockNumber &&
bucket_opaque->hasho_prevblkno > cachedmetap->hashm_maxbucket)
{
cachedmetap = _hash_getcachedmetap(rel, &metabuf, true);
Assert(cachedmetap != NULL);
}
}
bucket_buf = buf;
hashbucketcleanup(rel, cur_bucket, bucket_buf, blkno, info->strategy,
cachedmetap->hashm_maxbucket,
cachedmetap->hashm_highmask,
cachedmetap->hashm_lowmask, &tuples_removed,
&num_index_tuples, split_cleanup,
callback, callback_state);
_hash_dropbuf(rel, bucket_buf);
/* Advance to next bucket */
cur_bucket++;
}
if (BufferIsInvalid(metabuf))
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_NOLOCK, LH_META_PAGE);
/* Write-lock metapage and check for split since we started */
LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
metap = HashPageGetMeta(BufferGetPage(metabuf));
if (cur_maxbucket != metap->hashm_maxbucket)
{
/* There's been a split, so process the additional bucket(s) */
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
cachedmetap = _hash_getcachedmetap(rel, &metabuf, true);
Assert(cachedmetap != NULL);
cur_maxbucket = cachedmetap->hashm_maxbucket;
goto loop_top;
}
/* Okay, we're really done. Update tuple count in metapage. */
START_CRIT_SECTION();
if (orig_maxbucket == metap->hashm_maxbucket &&
orig_ntuples == metap->hashm_ntuples)
{
/*
* No one has split or inserted anything since start of scan, so
* believe our count as gospel.
*/
metap->hashm_ntuples = num_index_tuples;
}
else
{
/*
* Otherwise, our count is untrustworthy since we may have
* double-scanned tuples in split buckets. Proceed by dead-reckoning.
* (Note: we still return estimated_count = false, because using this
* count is better than not updating reltuples at all.)
*/
if (metap->hashm_ntuples > tuples_removed)
metap->hashm_ntuples -= tuples_removed;
else
metap->hashm_ntuples = 0;
num_index_tuples = metap->hashm_ntuples;
}
MarkBufferDirty(metabuf);
/* XLOG stuff */
if (RelationNeedsWAL(rel))
{
xl_hash_update_meta_page xlrec;
XLogRecPtr recptr;
xlrec.ntuples = metap->hashm_ntuples;
XLogBeginInsert();
XLogRegisterData((char *) &xlrec, sizeof(SizeOfHashUpdateMetaPage));
XLogRegisterBuffer(0, metabuf, REGBUF_STANDARD);
recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_UPDATE_META_PAGE);
PageSetLSN(BufferGetPage(metabuf), recptr);
}
END_CRIT_SECTION();
_hash_relbuf(rel, metabuf);
/* return statistics */
if (stats == NULL)
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
stats->estimated_count = false;
stats->num_index_tuples = num_index_tuples;
stats->tuples_removed += tuples_removed;
/* hashvacuumcleanup will fill in num_pages */
return stats;
}