本文整理汇总了C++中PageGetFreeSpace函数的典型用法代码示例。如果您正苦于以下问题:C++ PageGetFreeSpace函数的具体用法?C++ PageGetFreeSpace怎么用?C++ PageGetFreeSpace使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了PageGetFreeSpace函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: entryIsEnoughSpace
static bool
entryIsEnoughSpace(GinBtree btree, Buffer buf, OffsetNumber off,
GinBtreeEntryInsertData *insertData)
{
Size releasedsz = 0;
Size addedsz;
Page page = BufferGetPage(buf);
Assert(insertData->entry);
Assert(!GinPageIsData(page));
if (insertData->isDelete)
{
IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, off));
releasedsz = MAXALIGN(IndexTupleSize(itup)) + sizeof(ItemIdData);
}
addedsz = MAXALIGN(IndexTupleSize(insertData->entry)) + sizeof(ItemIdData);
if (PageGetFreeSpace(page) + releasedsz >= addedsz)
return true;
return false;
}
示例2: terminate_brin_buildstate
/*
* Release resources associated with a BrinBuildState.
*/
static void
terminate_brin_buildstate(BrinBuildState *state)
{
/*
* Release the last index buffer used. We might as well ensure that
* whatever free space remains in that page is available in FSM, too.
*/
if (!BufferIsInvalid(state->bs_currentInsertBuf))
{
Page page;
Size freespace;
BlockNumber blk;
page = BufferGetPage(state->bs_currentInsertBuf);
freespace = PageGetFreeSpace(page);
blk = BufferGetBlockNumber(state->bs_currentInsertBuf);
ReleaseBuffer(state->bs_currentInsertBuf);
RecordPageWithFreeSpace(state->bs_irel, blk, freespace);
FreeSpaceMapVacuumRange(state->bs_irel, blk, blk + 1);
}
brin_free_desc(state->bs_bdesc);
pfree(state->bs_dtuple);
pfree(state);
}
示例3: br_page_get_freespace
/*
* Return the amount of free space on a regular BRIN index page.
*
* If the page is not a regular page, or has been marked with the
* BRIN_EVACUATE_PAGE flag, returns 0.
*/
static Size
br_page_get_freespace(Page page)
{
if (!BRIN_IS_REGULAR_PAGE(page) ||
(BrinPageFlags(page) & BRIN_EVACUATE_PAGE) != 0)
return 0;
else
return PageGetFreeSpace(page);
}
示例4: gistnospace
/*
* Check space for itup vector on page
*/
static int
gistnospace(Page page, IndexTuple *itvec, int len)
{
unsigned int size = 0;
int i;
for (i = 0; i < len; i++)
size += IndexTupleSize(itvec[i]) + sizeof(ItemIdData);
return (PageGetFreeSpace(page) < size);
}
示例5: br_page_get_freespace
/*
* Return the amount of free space on a regular BRIN index page.
*
* If the page is not a regular page, or has been marked with the
* BRIN_EVACUATE_PAGE flag, returns 0.
*/
static Size
br_page_get_freespace(Page page)
{
BrinSpecialSpace *special;
special = (BrinSpecialSpace *) PageGetSpecialPointer(page);
if (!BRIN_IS_REGULAR_PAGE(page) ||
(special->flags & BRIN_EVACUATE_PAGE) != 0)
return 0;
else
return PageGetFreeSpace(page);
}
示例6: PageGetHeapFreeSpace
/*
* PageGetHeapFreeSpace
* Returns the size of the free (allocatable) space on a page,
* reduced by the space needed for a new line pointer.
*
* The difference between this and PageGetFreeSpace is that this will return
* zero if there are already MaxHeapTuplesPerPage line pointers in the page
* and none are free. We use this to enforce that no more than
* MaxHeapTuplesPerPage line pointers are created on a heap page. (Although
* no more tuples than that could fit anyway, in the presence of redirected
* or dead line pointers it'd be possible to have too many line pointers.
* To avoid breaking code that assumes MaxHeapTuplesPerPage is a hard limit
* on the number of line pointers, we make this extra check.)
*/
Size
PageGetHeapFreeSpace(Page page)
{
Size space;
space = PageGetFreeSpace(page);
if (space > 0)
{
OffsetNumber offnum,
nline;
/*
* Are there already MaxHeapTuplesPerPage line pointers in the page?
*/
nline = PageGetMaxOffsetNumber(page);
if (nline >= MaxHeapTuplesPerPage)
{
if (PageHasFreeLinePointers((PageHeader) page))
{
/*
* Since this is just a hint, we must confirm that there is
* indeed a free line pointer
*/
for (offnum = FirstOffsetNumber; offnum <= nline; offnum = OffsetNumberNext(offnum))
{
ItemId lp = PageGetItemId(page, offnum);
if (!ItemIdIsUsed(lp))
break;
}
if (offnum > nline)
{
/*
* The hint is wrong, but we can't clear it here since we
* don't have the ability to mark the page dirty.
*/
space = 0;
}
}
else
{
/*
* Although the hint might be wrong, PageAddItem will believe
* it anyway, so we must believe it too.
*/
space = 0;
}
}
}
return space;
}
示例7: lazy_vacuum_heap
/*
* lazy_vacuum_heap() -- second pass over the heap
*
* This routine marks dead tuples as unused and compacts out free
* space on their pages. Pages not having dead tuples recorded from
* lazy_scan_heap are not visited at all.
*
* Note: the reason for doing this as a second pass is we cannot remove
* the tuples until we've removed their index entries, and we want to
* process index entry removal in batches as large as possible.
*/
static void
lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
{
MIRROREDLOCK_BUFMGR_DECLARE;
int tupindex;
int npages;
PGRUsage ru0;
pg_rusage_init(&ru0);
npages = 0;
tupindex = 0;
/* Fetch gp_persistent_relation_node information that will be added to XLOG record. */
RelationFetchGpRelationNodeForXLog(onerel);
while (tupindex < vacrelstats->num_dead_tuples)
{
BlockNumber tblk;
Buffer buf;
Page page;
vacuum_delay_point();
tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
/* -------- MirroredLock ---------- */
MIRROREDLOCK_BUFMGR_LOCK;
buf = ReadBuffer(onerel, tblk);
LockBufferForCleanup(buf);
tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats);
/* Now that we've compacted the page, record its available space */
page = BufferGetPage(buf);
lazy_record_free_space(vacrelstats, tblk,
PageGetFreeSpace(page));
UnlockReleaseBuffer(buf);
MIRROREDLOCK_BUFMGR_UNLOCK;
/* -------- MirroredLock ---------- */
npages++;
}
ereport(elevel,
(errmsg("\"%s\": removed %d row versions in %d pages",
RelationGetRelationName(onerel),
tupindex, npages),
errdetail("%s.",
pg_rusage_show(&ru0))));
}
示例8: terminate_brin_buildstate
/*
* Release resources associated with a BrinBuildState.
*/
static void
terminate_brin_buildstate(BrinBuildState *state)
{
/* release the last index buffer used */
if (!BufferIsInvalid(state->bs_currentInsertBuf))
{
Page page;
page = BufferGetPage(state->bs_currentInsertBuf);
RecordPageWithFreeSpace(state->bs_irel,
BufferGetBlockNumber(state->bs_currentInsertBuf),
PageGetFreeSpace(page));
ReleaseBuffer(state->bs_currentInsertBuf);
}
brin_free_desc(state->bs_bdesc);
pfree(state->bs_dtuple);
pfree(state);
}
示例9: gistnospace
/*
* Check space for itup vector on page
*/
bool
gistnospace(Page page, IndexTuple *itvec, int len, OffsetNumber todelete, Size freespace)
{
unsigned int size = freespace,
deleted = 0;
int i;
for (i = 0; i < len; i++)
size += IndexTupleSize(itvec[i]) + sizeof(ItemIdData);
if (todelete != InvalidOffsetNumber)
{
IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, todelete));
deleted = IndexTupleSize(itup) + sizeof(ItemIdData);
}
return (PageGetFreeSpace(page) + deleted < size);
}
示例10: gist_dumptree
static void
gist_dumptree(Relation r, int level, BlockNumber blk, OffsetNumber coff)
{
Buffer buffer;
Page page;
GISTPageOpaque opaque;
IndexTuple which;
ItemId iid;
OffsetNumber i,
maxoff;
BlockNumber cblk;
char *pred;
pred = (char *) palloc(sizeof(char) * level + 1);
MemSet(pred, '\t', level);
pred[level] = '\0';
buffer = ReadBuffer(r, blk);
page = (Page) BufferGetPage(buffer);
opaque = (GISTPageOpaque) PageGetSpecialPointer(page);
maxoff = PageGetMaxOffsetNumber(page);
elog(DEBUG4, "%sPage: %d %s blk: %d maxoff: %d free: %d", pred,
coff, (opaque->flags & F_LEAF) ? "LEAF" : "INTE", (int) blk,
(int) maxoff, PageGetFreeSpace(page));
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
iid = PageGetItemId(page, i);
which = (IndexTuple) PageGetItem(page, iid);
cblk = ItemPointerGetBlockNumber(&(which->t_tid));
#ifdef PRINTTUPLE
elog(DEBUG4, "%s Tuple. blk: %d size: %d", pred, (int) cblk,
IndexTupleSize(which));
#endif
if (!(opaque->flags & F_LEAF))
gist_dumptree(r, level + 1, cblk, i);
}
ReleaseBuffer(buffer);
pfree(pred);
}
示例11: entryIsEnoughSpace
static bool
entryIsEnoughSpace(GinBtree btree, Buffer buf, OffsetNumber off)
{
Size itupsz = 0;
Page page = BufferGetPage(buf);
Assert(btree->entry);
Assert(!GinPageIsData(page));
if (btree->isDelete)
{
IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, off));
itupsz = MAXALIGN(IndexTupleSize(itup)) + sizeof(ItemIdData);
}
if (PageGetFreeSpace(page) + itupsz >= MAXALIGN(IndexTupleSize(btree->entry)) + sizeof(ItemIdData))
return true;
return false;
}
示例12: pgstat_index_page
/*
* pgstat_index_page -- for generic index page
*/
static void
pgstat_index_page(pgstattuple_type *stat, Page page,
OffsetNumber minoff, OffsetNumber maxoff)
{
OffsetNumber i;
stat->free_space += PageGetFreeSpace(page);
for (i = minoff; i <= maxoff; i = OffsetNumberNext(i))
{
ItemId itemid = PageGetItemId(page, i);
if (ItemIdIsDead(itemid))
{
stat->dead_tuple_count++;
stat->dead_tuple_len += ItemIdGetLength(itemid);
}
else
{
stat->tuple_count++;
stat->tuple_len += ItemIdGetLength(itemid);
}
}
}
示例13: pgstattuple_real
/*
* pgstattuple_real
*
* The real work occurs here
*/
static Datum
pgstattuple_real(Relation rel, FunctionCallInfo fcinfo)
{
HeapScanDesc scan;
HeapTuple tuple;
BlockNumber nblocks;
BlockNumber block = 0; /* next block to count free space in */
BlockNumber tupblock;
Buffer buffer;
uint64 table_len;
uint64 tuple_len = 0;
uint64 dead_tuple_len = 0;
uint64 tuple_count = 0;
uint64 dead_tuple_count = 0;
double tuple_percent;
double dead_tuple_percent;
uint64 free_space = 0; /* free/reusable space in bytes */
double free_percent; /* free/reusable space in % */
TupleDesc tupdesc;
AttInMetadata *attinmeta;
char **values;
int i;
Datum result;
/* Build a tuple descriptor for our result type */
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
elog(ERROR, "return type must be a row type");
/* make sure we have a persistent copy of the tupdesc */
tupdesc = CreateTupleDescCopy(tupdesc);
/*
* Generate attribute metadata needed later to produce tuples from raw C
* strings
*/
attinmeta = TupleDescGetAttInMetadata(tupdesc);
scan = heap_beginscan(rel, SnapshotAny, 0, NULL);
nblocks = scan->rs_nblocks; /* # blocks to be scanned */
/* scan the relation */
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
/* must hold a buffer lock to call HeapTupleSatisfiesNow */
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
if (HeapTupleSatisfiesNow(tuple->t_data, scan->rs_cbuf))
{
tuple_len += tuple->t_len;
tuple_count++;
}
else
{
dead_tuple_len += tuple->t_len;
dead_tuple_count++;
}
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
/*
* To avoid physically reading the table twice, try to do the
* free-space scan in parallel with the heap scan. However,
* heap_getnext may find no tuples on a given page, so we cannot
* simply examine the pages returned by the heap scan.
*/
tupblock = BlockIdGetBlockNumber(&tuple->t_self.ip_blkid);
while (block <= tupblock)
{
buffer = ReadBuffer(rel, block);
LockBuffer(buffer, BUFFER_LOCK_SHARE);
free_space += PageGetFreeSpace((Page) BufferGetPage(buffer));
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
block++;
}
}
heap_endscan(scan);
while (block < nblocks)
{
buffer = ReadBuffer(rel, block);
free_space += PageGetFreeSpace((Page) BufferGetPage(buffer));
ReleaseBuffer(buffer);
block++;
}
heap_close(rel, AccessShareLock);
table_len = (uint64) nblocks *BLCKSZ;
if (nblocks == 0)
{
tuple_percent = 0.0;
//.........这里部分代码省略.........
示例14: _hash_doinsert
/*
* _hash_doinsert() -- Handle insertion of a single HashItem in the table.
*
* This routine is called by the public interface routines, hashbuild
* and hashinsert. By here, hashitem is completely filled in.
* The datum to be used as a "key" is in the hashitem.
*/
InsertIndexResult
_hash_doinsert(Relation rel, HashItem hitem)
{
Buffer buf;
Buffer metabuf;
HashMetaPage metap;
IndexTuple itup;
BlockNumber itup_blkno;
OffsetNumber itup_off;
InsertIndexResult res;
BlockNumber blkno;
Page page;
HashPageOpaque pageopaque;
Size itemsz;
bool do_expand;
uint32 hashkey;
Bucket bucket;
Datum datum;
bool isnull;
/*
* Compute the hash key for the item. We do this first so as not to
* need to hold any locks while running the hash function.
*/
itup = &(hitem->hash_itup);
if (rel->rd_rel->relnatts != 1)
elog(ERROR, "hash indexes support only one index key");
datum = index_getattr(itup, 1, RelationGetDescr(rel), &isnull);
Assert(!isnull);
hashkey = _hash_datum2hashkey(rel, datum);
/* compute item size too */
itemsz = IndexTupleDSize(hitem->hash_itup)
+ (sizeof(HashItemData) - sizeof(IndexTupleData));
itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but
* we need to be consistent */
/*
* Acquire shared split lock so we can compute the target bucket
* safely (see README).
*/
_hash_getlock(rel, 0, HASH_SHARE);
/* Read the metapage */
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ);
metap = (HashMetaPage) BufferGetPage(metabuf);
_hash_checkpage(rel, (Page) metap, LH_META_PAGE);
/*
* Check whether the item can fit on a hash page at all. (Eventually,
* we ought to try to apply TOAST methods if not.) Note that at this
* point, itemsz doesn't include the ItemId.
*/
if (itemsz > HashMaxItemSize((Page) metap))
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("index row size %lu exceeds hash maximum %lu",
(unsigned long) itemsz,
(unsigned long) HashMaxItemSize((Page) metap))));
/*
* Compute the target bucket number, and convert to block number.
*/
bucket = _hash_hashkey2bucket(hashkey,
metap->hashm_maxbucket,
metap->hashm_highmask,
metap->hashm_lowmask);
blkno = BUCKET_TO_BLKNO(metap, bucket);
/* release lock on metapage, but keep pin since we'll need it again */
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
/*
* Acquire share lock on target bucket; then we can release split lock.
*/
_hash_getlock(rel, blkno, HASH_SHARE);
_hash_droplock(rel, 0, HASH_SHARE);
/* Fetch the primary bucket page for the bucket */
buf = _hash_getbuf(rel, blkno, HASH_WRITE);
page = BufferGetPage(buf);
_hash_checkpage(rel, page, LH_BUCKET_PAGE);
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
Assert(pageopaque->hasho_bucket == bucket);
/* Do the insertion */
while (PageGetFreeSpace(page) < itemsz)
{
/*
* no space on this page; check for an overflow page
//.........这里部分代码省略.........
示例15: _hash_splitbucket
//.........这里部分代码省略.........
if (ooffnum > omaxoffnum)
{
/* at end of page, but check for an(other) overflow page */
oblkno = oopaque->hasho_nextblkno;
if (!BlockNumberIsValid(oblkno))
break;
/*
* we ran out of tuples on this particular page, but we
* have more overflow pages; advance to next page.
*/
_hash_wrtbuf(rel, obuf);
obuf = _hash_getbuf(rel, oblkno, HASH_WRITE);
opage = BufferGetPage(obuf);
_hash_checkpage(rel, opage, LH_OVERFLOW_PAGE);
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
ooffnum = FirstOffsetNumber;
omaxoffnum = PageGetMaxOffsetNumber(opage);
continue;
}
/*
* Re-hash the tuple to determine which bucket it now belongs in.
*
* It is annoying to call the hash function while holding locks,
* but releasing and relocking the page for each tuple is unappealing
* too.
*/
hitem = (HashItem) PageGetItem(opage, PageGetItemId(opage, ooffnum));
itup = &(hitem->hash_itup);
datum = index_getattr(itup, 1, itupdesc, &null);
Assert(!null);
bucket = _hash_hashkey2bucket(_hash_datum2hashkey(rel, datum),
maxbucket, highmask, lowmask);
if (bucket == nbucket)
{
/*
* insert the tuple into the new bucket. if it doesn't fit on
* the current page in the new bucket, we must allocate a new
* overflow page and place the tuple on that page instead.
*/
itemsz = IndexTupleDSize(hitem->hash_itup)
+ (sizeof(HashItemData) - sizeof(IndexTupleData));
itemsz = MAXALIGN(itemsz);
if (PageGetFreeSpace(npage) < itemsz)
{
/* write out nbuf and drop lock, but keep pin */
_hash_chgbufaccess(rel, nbuf, HASH_WRITE, HASH_NOLOCK);
/* chain to a new overflow page */
nbuf = _hash_addovflpage(rel, metabuf, nbuf);
npage = BufferGetPage(nbuf);
_hash_checkpage(rel, npage, LH_OVERFLOW_PAGE);
/* we don't need nopaque within the loop */
}
noffnum = OffsetNumberNext(PageGetMaxOffsetNumber(npage));
if (PageAddItem(npage, (Item) hitem, itemsz, noffnum, LP_USED)
== InvalidOffsetNumber)
elog(ERROR, "failed to add index item to \"%s\"",
RelationGetRelationName(rel));
/*
* now delete the tuple from the old bucket. after this
* section of code, 'ooffnum' will actually point to the
* ItemId to which we would point if we had advanced it before
* the deletion (PageIndexTupleDelete repacks the ItemId
* array). this also means that 'omaxoffnum' is exactly one
* less than it used to be, so we really can just decrement it
* instead of calling PageGetMaxOffsetNumber.
*/
PageIndexTupleDelete(opage, ooffnum);
omaxoffnum = OffsetNumberPrev(omaxoffnum);
}
else
{
/*
* the tuple stays on this page. we didn't move anything, so
* we didn't delete anything and therefore we don't have to
* change 'omaxoffnum'.
*/
Assert(bucket == obucket);
ooffnum = OffsetNumberNext(ooffnum);
}
}
/*
* We're at the end of the old bucket chain, so we're done partitioning
* the tuples. Before quitting, call _hash_squeezebucket to ensure the
* tuples remaining in the old bucket (including the overflow pages) are
* packed as tightly as possible. The new bucket is already tight.
*/
_hash_wrtbuf(rel, obuf);
_hash_wrtbuf(rel, nbuf);
_hash_squeezebucket(rel, obucket, start_oblkno);
}