本文整理汇总了C++中ItemPointerIsValid函数的典型用法代码示例。如果您正苦于以下问题:C++ ItemPointerIsValid函数的具体用法?C++ ItemPointerIsValid怎么用?C++ ItemPointerIsValid使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ItemPointerIsValid函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: PLy_procedure_argument_valid
/*
* Check if our cached information about a datatype is still valid
*/
static bool
PLy_procedure_argument_valid(PLyTypeInfo *arg)
{
HeapTuple relTup;
bool valid;
/* Nothing to cache unless type is composite */
if (arg->is_rowtype != 1)
return true;
/*
* Zero typ_relid means that we got called on an output argument of a
* function returning a unnamed record type; the info for it can't change.
*/
if (!OidIsValid(arg->typ_relid))
return true;
/* Else we should have some cached data */
Assert(TransactionIdIsValid(arg->typrel_xmin));
Assert(ItemPointerIsValid(&arg->typrel_tid));
/* Get the pg_class tuple for the data type */
relTup = SearchSysCache1(RELOID, ObjectIdGetDatum(arg->typ_relid));
if (!HeapTupleIsValid(relTup))
elog(ERROR, "cache lookup failed for relation %u", arg->typ_relid);
/* If it has changed, the cached data is not valid */
valid = (arg->typrel_xmin == HeapTupleHeaderGetRawXmin(relTup->t_data) &&
ItemPointerEquals(&arg->typrel_tid, &relTup->t_self));
ReleaseSysCache(relTup);
return valid;
}
示例2: gistgettuple
/*
* gistgettuple() -- Get the next tuple in the scan
*/
Datum
gistgettuple(PG_FUNCTION_ARGS)
{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
GISTScanOpaque so;
ItemPointerData tid;
bool res;
so = (GISTScanOpaque) scan->opaque;
/*
* If we have produced an index tuple in the past and the executor has
* informed us we need to mark it as "killed", do so now.
*/
if (scan->kill_prior_tuple && ItemPointerIsValid(&(so->curpos)))
killtuple(scan->indexRelation, so, &(so->curpos));
/*
* Get the next tuple that matches the search key. If asked to skip killed
* tuples, continue looping until we find a non-killed tuple that matches
* the search key.
*/
res = (gistnext(scan, dir, &tid, 1, scan->ignore_killed_tuples)) ? true : false;
PG_RETURN_BOOL(res);
}
示例3: GinDataPageAddItemPointer
/*
* add ItemPointer to a leaf page.
*/
void
GinDataPageAddItemPointer(Page page, ItemPointer data, OffsetNumber offset)
{
OffsetNumber maxoff = GinPageGetOpaque(page)->maxoff;
char *ptr;
Assert(ItemPointerIsValid(data));
Assert(GinPageIsLeaf(page));
if (offset == InvalidOffsetNumber)
{
ptr = (char *) GinDataPageGetItemPointer(page, maxoff + 1);
}
else
{
ptr = (char *) GinDataPageGetItemPointer(page, offset);
if (maxoff + 1 - offset != 0)
memmove(ptr + sizeof(ItemPointerData),
ptr,
(maxoff - offset + 1) * sizeof(ItemPointerData));
}
memcpy(ptr, data, sizeof(ItemPointerData));
GinPageGetOpaque(page)->maxoff++;
}
示例4: ginInsertBAEntries
/*
* Insert the entries for one heap pointer.
*
* Since the entries are being inserted into a balanced binary tree, you
* might think that the order of insertion wouldn't be critical, but it turns
* out that inserting the entries in sorted order results in a lot of
* rebalancing operations and is slow. To prevent this, we attempt to insert
* the nodes in an order that will produce a nearly-balanced tree if the input
* is in fact sorted.
*
* We do this as follows. First, we imagine that we have an array whose size
* is the smallest power of two greater than or equal to the actual array
* size. Second, we insert the middle entry of our virtual array into the
* tree; then, we insert the middles of each half of our virtual array, then
* middles of quarters, etc.
*/
void
ginInsertBAEntries(BuildAccumulator *accum,
ItemPointer heapptr, OffsetNumber attnum,
Datum *entries, GinNullCategory *categories,
int32 nentries)
{
uint32 step = nentries;
if (nentries <= 0)
return;
Assert(ItemPointerIsValid(heapptr) && attnum >= FirstOffsetNumber);
/*
* step will contain largest power of 2 and <= nentries
*/
step |= (step >> 1);
step |= (step >> 2);
step |= (step >> 4);
step |= (step >> 8);
step |= (step >> 16);
step >>= 1;
step++;
while (step > 0)
{
int i;
for (i = step - 1; i < nentries && i >= 0; i += step << 1 /* *2 */ )
ginInsertBAEntry(accum, heapptr, attnum,
entries[i], categories[i]);
step >>= 1; /* /2 */
}
}
示例5: adjustiptr
/*
* adjustiptr() -- adjust current and marked item pointers in the scan
*
* Depending on the type of update and the place it happened, we
* need to do nothing, to back up one record, or to start over on
* the same page.
*/
static void
adjustiptr(IndexScanDesc s,
ItemPointer iptr,
int op,
BlockNumber blkno,
OffsetNumber offnum)
{
OffsetNumber curoff;
RTreeScanOpaque so;
if (ItemPointerIsValid(iptr))
{
if (ItemPointerGetBlockNumber(iptr) == blkno)
{
curoff = ItemPointerGetOffsetNumber(iptr);
so = (RTreeScanOpaque) s->opaque;
switch (op)
{
case RTOP_DEL:
/* back up one if we need to */
if (curoff >= offnum)
{
if (curoff > FirstOffsetNumber)
{
/* just adjust the item pointer */
ItemPointerSet(iptr, blkno, OffsetNumberPrev(curoff));
}
else
{
/*
* remember that we're before the current
* tuple
*/
ItemPointerSet(iptr, blkno, FirstOffsetNumber);
if (iptr == &(s->currentItemData))
so->s_flags |= RTS_CURBEFORE;
else
so->s_flags |= RTS_MRKBEFORE;
}
}
break;
case RTOP_SPLIT:
/* back to start of page on split */
ItemPointerSet(iptr, blkno, FirstOffsetNumber);
if (iptr == &(s->currentItemData))
so->s_flags &= ~RTS_CURBEFORE;
else
so->s_flags &= ~RTS_MRKBEFORE;
break;
default:
elog(ERROR, "unrecognized operation in rtree scan adjust: %d", op);
}
}
}
}
示例6: HeapTupleSatisfiesToast
/*
* HeapTupleSatisfiesToast
* True iff heap tuple is valid as a TOAST row.
*
* This is a simplified version that only checks for VACUUM moving conditions.
* It's appropriate for TOAST usage because TOAST really doesn't want to do
* its own time qual checks; if you can see the main table row that contains
* a TOAST reference, you should be able to see the TOASTed value. However,
* vacuuming a TOAST table is independent of the main table, and in case such
* a vacuum fails partway through, we'd better do this much checking.
*
* Among other things, this means you can't do UPDATEs of rows in a TOAST
* table.
*/
bool
HeapTupleSatisfiesToast(HeapTuple htup, Snapshot snapshot,
Buffer buffer)
{
HeapTupleHeader tuple = htup->t_data;
Assert(ItemPointerIsValid(&htup->t_self));
Assert(htup->t_tableOid != InvalidOid);
if (!HeapTupleHeaderXminCommitted(tuple))
{
if (HeapTupleHeaderXminInvalid(tuple))
return false;
/* Used by pre-9.0 binary upgrades */
if (tuple->t_infomask & HEAP_MOVED_OFF)
{
TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
if (TransactionIdIsCurrentTransactionId(xvac))
return false;
if (!TransactionIdIsInProgress(xvac))
{
if (TransactionIdDidCommit(xvac))
{
SetHintBits(tuple, buffer, HEAP_XMIN_INVALID,
InvalidTransactionId);
return false;
}
SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED,
InvalidTransactionId);
}
}
/* Used by pre-9.0 binary upgrades */
else if (tuple->t_infomask & HEAP_MOVED_IN)
{
TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
if (!TransactionIdIsCurrentTransactionId(xvac))
{
if (TransactionIdIsInProgress(xvac))
return false;
if (TransactionIdDidCommit(xvac))
SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED,
InvalidTransactionId);
else
{
SetHintBits(tuple, buffer, HEAP_XMIN_INVALID,
InvalidTransactionId);
return false;
}
}
}
}
/* otherwise assume the tuple is valid for TOAST. */
return true;
}
示例7: uint64_to_itemptr
static inline void
uint64_to_itemptr(uint64 val, ItemPointer iptr)
{
GinItemPointerSetOffsetNumber(iptr, val & ((1 << MaxHeapTuplesPerPageBits) - 1));
val = val >> MaxHeapTuplesPerPageBits;
GinItemPointerSetBlockNumber(iptr, val);
Assert(ItemPointerIsValid(iptr));
}
示例8: processPendingPage
/*
* Collect data from a pending-list page in preparation for insertion into
* the main index.
*
* Go through all tuples >= startoff on page and collect values in accum
*
* Note that ka is just workspace --- it does not carry any state across
* calls.
*/
static void
processPendingPage(BuildAccumulator *accum, KeyArray *ka,
Page page, OffsetNumber startoff)
{
ItemPointerData heapptr;
OffsetNumber i,
maxoff;
OffsetNumber attrnum;
/* reset *ka to empty */
ka->nvalues = 0;
maxoff = PageGetMaxOffsetNumber(page);
Assert(maxoff >= FirstOffsetNumber);
ItemPointerSetInvalid(&heapptr);
attrnum = 0;
for (i = startoff; i <= maxoff; i = OffsetNumberNext(i))
{
IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, i));
OffsetNumber curattnum;
Datum curkey;
GinNullCategory curcategory;
/* Check for change of heap TID or attnum */
curattnum = gintuple_get_attrnum(accum->ginstate, itup);
if (!ItemPointerIsValid(&heapptr))
{
heapptr = itup->t_tid;
attrnum = curattnum;
}
else if (!(ItemPointerEquals(&heapptr, &itup->t_tid) &&
curattnum == attrnum))
{
/*
* ginInsertBAEntries can insert several datums per call, but only
* for one heap tuple and one column. So call it at a boundary,
* and reset ka.
*/
ginInsertBAEntries(accum, &heapptr, attrnum,
ka->keys, ka->categories, ka->nvalues);
ka->nvalues = 0;
heapptr = itup->t_tid;
attrnum = curattnum;
}
/* Add key to KeyArray */
curkey = gintuple_get_key(accum->ginstate, itup, &curcategory);
addDatum(ka, curkey, curcategory);
}
/* Dump out all remaining keys */
ginInsertBAEntries(accum, &heapptr, attrnum,
ka->keys, ka->categories, ka->nvalues);
}
示例9: PersistentStore_AddTuple
void PersistentStore_AddTuple(
PersistentStoreData *storeData,
PersistentStoreSharedData *storeSharedData,
Datum *values,
bool flushToXLog,
/* When true, the XLOG record for this change will be flushed to disk. */
ItemPointer persistentTid,
/* TID of the stored tuple. */
int64 *persistentSerialNum)
{
#ifdef USE_ASSERT_CHECKING
if (storeSharedData == NULL ||
!PersistentStoreSharedData_EyecatcherIsValid(storeSharedData))
elog(ERROR, "Persistent store shared-memory not valid");
#endif
PTCheck_BeforeAddingEntry(storeData, values);
if (Debug_persistent_store_print)
elog(PersistentStore_DebugPrintLevel(),
"PersistentStore_AddTuple: Going to add tuple ('%s', shared data %p)",
storeData->tableName,
storeSharedData);
*persistentSerialNum = ++storeSharedData->maxInUseSerialNum;
storeData->myHighestSerialNum = storeSharedData->maxInUseSerialNum;
GlobalSequence_Set(
storeData->gpGlobalSequence,
*persistentSerialNum);
// Overwrite with the new serial number value.
values[storeData->attNumPersistentSerialNum - 1] =
Int64GetDatum(*persistentSerialNum);
/*
* Add new tuple.
*/
PersistentStore_InsertTuple(
storeData,
storeSharedData,
values,
flushToXLog,
persistentTid);
Assert(ItemPointerIsValid(persistentTid));
storeSharedData->inUseCount++;
if (Debug_persistent_store_print)
elog(PersistentStore_DebugPrintLevel(),
"PersistentStore_AddTuple: Added tuple ('%s', in use count " INT64_FORMAT ", shared data %p)",
storeData->tableName,
storeSharedData->inUseCount,
storeSharedData);
}
示例10: AppendOnlyVisimapStore_Store
/*
* Stores the visibility map entry.
*
* The entry/tuple is invalidated after this function call.
*
* Assumes that a valid visimap entry is passed.
* Assumes that the entry corresponds to the latest tuple
* returned by AppendOnlyVisimapStore_find.
*
* Should not be called twice in the same command.
*/
void
AppendOnlyVisimapStore_Store(
AppendOnlyVisimapStore* visiMapStore,
AppendOnlyVisimapEntry* visiMapEntry)
{
MemoryContext oldContext;
Relation visimapRelation;
TupleDesc heapTupleDesc;
HeapTuple tuple;
Datum values[Natts_pg_aovisimap];
bool nulls[Natts_pg_aovisimap];
Assert(visiMapStore);
Assert(visiMapEntry);
elogif (Debug_appendonly_print_visimap, LOG,
"Append-only visi map store: Store visimap entry: "
"(segFileNum, firstRowNum) = (%u, " INT64_FORMAT ")",
visiMapEntry->segmentFileNum, visiMapEntry->firstRowNum);
oldContext = MemoryContextSwitchTo(visiMapStore->memoryContext);
AppendOnlyVisimapEntry_Write(visiMapEntry, values,
nulls);
visimapRelation = visiMapStore->visimapRelation;
heapTupleDesc = RelationGetDescr(visimapRelation);
tuple = heap_form_tuple(heapTupleDesc,
values,
nulls);
/*
* Write out the visimap entry to the relation.
* If this visimap entry already in the relation, we update
* the row. Otherwise, a new row is inserted.
*/
if (ItemPointerIsValid(&visiMapEntry->tupleTid))
{
simple_heap_update(visimapRelation, &visiMapEntry->tupleTid, tuple);
}
else
{
simple_heap_insert(visimapRelation, tuple);
}
CatalogUpdateIndexes(visimapRelation, tuple);
heap_freetuple(tuple);
MemoryContextSwitchTo(oldContext);
// Invalidate the data after storing it.
ItemPointerSetInvalid(&visiMapEntry->tupleTid);
}
示例11: uint64_to_itemptr
static inline void
uint64_to_itemptr(uint64 val, ItemPointer iptr)
{
iptr->ip_posid = val & ((1 << MaxHeapTuplesPerPageBits) - 1);
val = val >> MaxHeapTuplesPerPageBits;
iptr->ip_blkid.bi_lo = val & 0xFFFF;
val = val >> 16;
iptr->ip_blkid.bi_hi = val & 0xFFFF;
Assert(ItemPointerIsValid(iptr));
}
示例12: InMemHeap_Update
/*
* update a tuple in in-memory heap table.
*
* if the target tuple already in the memory,
* update it in-place with flag INMEM_HEAP_TUPLE_UPDATED.
* else report an error.
*
* update should not change the otid of the old tuple,
* since updated tuple should write back to the master and update there.
*/
void
InMemHeap_Update(InMemHeapRelation relation, ItemPointer otid,
HeapTuple tup)
{
int pos;
HeapTuple target;
MemoryContext oldmem = CurrentMemoryContext;
Assert(ItemPointerIsValid(otid));
pos = InMemHeap_Find(relation, otid);
CurrentMemoryContext = relation->memcxt;
/*
* not found, report error
*/
if (pos >= relation->tupsize)
{
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("update a tuple which does not exist,"
" relname = %s, relid = %u", relation->rel->rd_rel->relname.data,
relation->relid)));
}
Insist(relation->hashIndex == NULL && "cannot handle index in in-memory heap when update");
/*
* already in table
*/
Assert(relation->tuples[pos].flags == INMEM_HEAP_TUPLE_DISPATCHED
|| relation->tuples[pos].flags == INMEM_HEAP_TUPLE_UPDATED);
relation->tuples[pos].flags = INMEM_HEAP_TUPLE_UPDATED;
target = heaptuple_copy_to(tup, NULL, NULL );
/*
* do not modify original tuple header
*/
ItemPointerCopy(&target->t_self, &relation->tuples[pos].tuple->t_self);
Assert(ItemPointerEquals(&target->t_self, otid));
memcpy(target->t_data, relation->tuples[pos].tuple->t_data,
sizeof(HeapTupleHeaderData));
CurrentMemoryContext = oldmem;
pfree(relation->tuples[pos].tuple);
relation->tuples[pos].tuple = target;
}
示例13: pushIncompleteInsert
static void
pushIncompleteInsert(RelFileNode node, XLogRecPtr lsn, ItemPointerData key,
BlockNumber *blkno, int lenblk,
PageSplitRecord *xlinfo /* to extract blkno info */ )
{
MemoryContext oldCxt;
gistIncompleteInsert *ninsert;
if (!ItemPointerIsValid(&key))
/*
* if key is null then we should not store insertion as incomplete,
* because it's a vacuum operation..
*/
return;
oldCxt = MemoryContextSwitchTo(insertCtx);
ninsert = (gistIncompleteInsert *) palloc(sizeof(gistIncompleteInsert));
ninsert->node = node;
ninsert->key = key;
ninsert->lsn = lsn;
if (lenblk && blkno)
{
ninsert->lenblk = lenblk;
ninsert->blkno = (BlockNumber *) palloc(sizeof(BlockNumber) * ninsert->lenblk);
memcpy(ninsert->blkno, blkno, sizeof(BlockNumber) * ninsert->lenblk);
ninsert->origblkno = *blkno;
}
else
{
int i;
Assert(xlinfo);
ninsert->lenblk = xlinfo->data->npage;
ninsert->blkno = (BlockNumber *) palloc(sizeof(BlockNumber) * ninsert->lenblk);
for (i = 0; i < ninsert->lenblk; i++)
ninsert->blkno[i] = xlinfo->page[i].header->blkno;
ninsert->origblkno = xlinfo->data->origblkno;
}
Assert(ninsert->lenblk > 0);
/*
* Stick the new incomplete insert onto the front of the list, not the
* back. This is so that gist_xlog_cleanup will process incompletions in
* last-in-first-out order.
*/
incomplete_inserts = lcons(ninsert, incomplete_inserts);
MemoryContextSwitchTo(oldCxt);
}
示例14: XactLockTableWaitErrorCb
/*
* XactLockTableWaitErrorContextCb
* Error context callback for transaction lock waits.
*/
static void
XactLockTableWaitErrorCb(void *arg)
{
XactLockTableWaitInfo *info = (XactLockTableWaitInfo *) arg;
/*
* We would like to print schema name too, but that would require a
* syscache lookup.
*/
if (info->oper != XLTW_None &&
ItemPointerIsValid(info->ctid) && RelationIsValid(info->rel))
{
const char *cxt;
switch (info->oper)
{
case XLTW_Update:
cxt = gettext_noop("while updating tuple (%u,%u) in relation \"%s\"");
break;
case XLTW_Delete:
cxt = gettext_noop("while deleting tuple (%u,%u) in relation \"%s\"");
break;
case XLTW_Lock:
cxt = gettext_noop("while locking tuple (%u,%u) in relation \"%s\"");
break;
case XLTW_LockUpdated:
cxt = gettext_noop("while locking updated version (%u,%u) of tuple in relation \"%s\"");
break;
case XLTW_InsertIndex:
cxt = gettext_noop("while inserting index tuple (%u,%u) in relation \"%s\"");
break;
case XLTW_InsertIndexUnique:
cxt = gettext_noop("while checking uniqueness of tuple (%u,%u) in relation \"%s\"");
break;
case XLTW_FetchUpdated:
cxt = gettext_noop("while rechecking updated tuple (%u,%u) in relation \"%s\"");
break;
case XLTW_RecheckExclusionConstr:
cxt = gettext_noop("while checking exclusion constraint on tuple (%u,%u) in relation \"%s\"");
break;
default:
return;
}
errcontext(cxt,
ItemPointerGetBlockNumber(info->ctid),
ItemPointerGetOffsetNumber(info->ctid),
RelationGetRelationName(info->rel));
}
}
示例15: itemptr_to_uint64
static inline uint64
itemptr_to_uint64(const ItemPointer iptr)
{
uint64 val;
Assert(ItemPointerIsValid(iptr));
Assert(GinItemPointerGetOffsetNumber(iptr) < (1 << MaxHeapTuplesPerPageBits));
val = GinItemPointerGetBlockNumber(iptr);
val <<= MaxHeapTuplesPerPageBits;
val |= GinItemPointerGetOffsetNumber(iptr);
return val;
}