本文整理汇总了C++中ExecStoreTuple函数的典型用法代码示例。如果您正苦于以下问题:C++ ExecStoreTuple函数的具体用法?C++ ExecStoreTuple怎么用?C++ ExecStoreTuple使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ExecStoreTuple函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: get_all_brokers
/*
* get_all_brokers
*
* Return a list of all brokers in pipeline_kafka_brokers
*/
static List *
get_all_brokers(void)
{
HeapTuple tup = NULL;
HeapScanDesc scan;
Relation brokers = open_pipeline_kafka_brokers();
TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(brokers));
List *result = NIL;
scan = heap_beginscan(brokers, GetTransactionSnapshot(), 0, NULL);
while ((tup = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
char *host;
Datum d;
bool isnull;
ExecStoreTuple(tup, slot, InvalidBuffer, false);
d = slot_getattr(slot, BROKER_ATTR_HOST, &isnull);
host = TextDatumGetCString(d);
result = lappend(result, host);
}
ExecDropSingleTupleTableSlot(slot);
heap_endscan(scan);
heap_close(brokers, NoLock);
return result;
}
示例2: SpoolerInsert
void
SpoolerInsert(Spooler *self, HeapTuple tuple)
{
/* Spool keys in the tuple */
ExecStoreTuple(tuple, self->slot, InvalidBuffer, false);
IndexSpoolInsert(self->spools, self->slot, &(tuple->t_self), self->estate, true);
BULKLOAD_PROFILE(&prof_writer_index);
}
示例3: FunctionNext
/* ----------------------------------------------------------------
* FunctionNext
*
* This is a workhorse for ExecFunctionScan
* ----------------------------------------------------------------
*/
static TupleTableSlot *
FunctionNext(FunctionScanState *node)
{
TupleTableSlot *slot;
EState *estate;
ScanDirection direction;
Tuplestorestate *tuplestorestate;
bool should_free;
HeapTuple heapTuple;
/*
* get information from the estate and scan state
*/
estate = node->ss.ps.state;
direction = estate->es_direction;
tuplestorestate = node->tuplestorestate;
/*
* If first time through, read all tuples from function and put them
* in a tuplestore. Subsequent calls just fetch tuples from
* tuplestore.
*/
if (tuplestorestate == NULL)
{
ExprContext *econtext = node->ss.ps.ps_ExprContext;
TupleDesc funcTupdesc;
node->tuplestorestate = tuplestorestate =
ExecMakeTableFunctionResult(node->funcexpr,
econtext,
node->tupdesc,
&funcTupdesc);
/*
* If function provided a tupdesc, cross-check it. We only really
* need to do this for functions returning RECORD, but might as
* well do it always.
*/
if (funcTupdesc && !tupledesc_match(node->tupdesc, funcTupdesc))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("query-specified return row and actual function return row do not match")));
}
/*
* Get the next tuple from tuplestore. Return NULL if no more tuples.
*/
heapTuple = tuplestore_getheaptuple(tuplestorestate,
ScanDirectionIsForward(direction),
&should_free);
slot = node->ss.ss_ScanTupleSlot;
return ExecStoreTuple(heapTuple, slot, InvalidBuffer, should_free);
}
示例4: FunctionNext
/* ----------------------------------------------------------------
* FunctionNext
*
* This is a workhorse for ExecFunctionScan
* ----------------------------------------------------------------
*/
static TupleTableSlot *
FunctionNext(FunctionScanState *node)
{
TupleTableSlot *slot;
EState *estate;
ScanDirection direction;
Tuplestorestate *tuplestorestate;
bool should_free;
HeapTuple heapTuple;
/*
* get information from the estate and scan state
*/
estate = node->ss.ps.state;
direction = estate->es_direction;
tuplestorestate = node->tuplestorestate;
/*
* If first time through, read all tuples from function and put them in a
* tuplestore. Subsequent calls just fetch tuples from tuplestore.
*/
if (tuplestorestate == NULL)
{
ExprContext *econtext = node->ss.ps.ps_ExprContext;
TupleDesc funcTupdesc;
node->tuplestorestate = tuplestorestate =
ExecMakeTableFunctionResult(node->funcexpr,
econtext,
node->tupdesc,
&funcTupdesc);
/*
* If function provided a tupdesc, cross-check it. We only really
* need to do this for functions returning RECORD, but might as well
* do it always.
*/
if (funcTupdesc)
tupledesc_match(node->tupdesc, funcTupdesc);
}
/*
* Get the next tuple from tuplestore. Return NULL if no more tuples.
*/
heapTuple = tuplestore_getheaptuple(tuplestorestate,
ScanDirectionIsForward(direction),
&should_free);
slot = node->ss.ss_ScanTupleSlot;
if (heapTuple)
return ExecStoreTuple(heapTuple, slot, InvalidBuffer, should_free);
else
return ExecClearTuple(slot);
}
示例5: SeqNext
/* ----------------------------------------------------------------
* SeqNext
*
* This is a workhorse for ExecSeqScan
* ----------------------------------------------------------------
*/
static TupleTableSlot *
SeqNext(SeqScanState *node)
{
HeapTuple tuple;
HeapScanDesc scandesc;
EState *estate;
ScanDirection direction;
TupleTableSlot *slot;
/*
* get information from the estate and scan state
*/
scandesc = node->ss.ss_currentScanDesc;
estate = node->ss.ps.state;
direction = estate->es_direction;
slot = node->ss.ss_ScanTupleSlot;
if (scandesc == NULL)
{
/*
* We reach here if the scan is not parallel, or if we're executing a
* scan that was intended to be parallel serially.
*/
scandesc = heap_beginscan(node->ss.ss_currentRelation,
estate->es_snapshot,
0, NULL);
node->ss.ss_currentScanDesc = scandesc;
}
/*
* get the next tuple from the table
*/
tuple = heap_getnext(scandesc, direction);
/*
* save the tuple and the buffer returned to us by the access methods in
* our scan tuple slot and return the slot. Note: we pass 'false' because
* tuples returned by heap_getnext() are pointers onto disk pages and were
* not created with palloc() and so should not be pfree()'d. Note also
* that ExecStoreTuple will increment the refcount of the buffer; the
* refcount will not be dropped until the tuple table slot is cleared.
*/
if (tuple)
ExecStoreTuple(tuple, /* tuple to store */
slot, /* slot to store in */
scandesc->rs_cbuf, /* buffer associated with this
* tuple */
false); /* don't pfree this pointer */
else
ExecClearTuple(slot);
return slot;
}
示例6: load_consumer_offsets
/*
* load_consumer_offsets
*
* Load all offsets for all of this consumer's partitions
*/
static void
load_consumer_offsets(KafkaConsumer *consumer, struct rd_kafka_metadata_topic *meta, int64_t offset)
{
MemoryContext old;
ScanKeyData skey[1];
HeapTuple tup = NULL;
HeapScanDesc scan;
Relation offsets = open_pipeline_kafka_offsets();
TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(offsets));
int i;
ScanKeyInit(&skey[0], OFFSETS_ATTR_CONSUMER, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(consumer->id));
scan = heap_beginscan(offsets, GetTransactionSnapshot(), 1, skey);
old = MemoryContextSwitchTo(CacheMemoryContext);
consumer->offsets = palloc0(meta->partition_cnt * sizeof(int64_t));
MemoryContextSwitchTo(old);
/* by default, begin consuming from the end of a stream */
for (i = 0; i < meta->partition_cnt; i++)
consumer->offsets[i] = offset;
consumer->num_partitions = meta->partition_cnt;
while ((tup = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
Datum d;
bool isnull;
int partition;
ExecStoreTuple(tup, slot, InvalidBuffer, false);
d = slot_getattr(slot, OFFSETS_ATTR_PARTITION, &isnull);
partition = DatumGetInt32(d);
if(partition > consumer->num_partitions)
elog(ERROR, "invalid partition id: %d", partition);
if (offset == RD_KAFKA_OFFSET_NULL)
{
d = slot_getattr(slot, OFFSETS_ATTR_OFFSET, &isnull);
if (isnull)
offset = RD_KAFKA_OFFSET_END;
else
offset = DatumGetInt64(d);
}
consumer->offsets[partition] = DatumGetInt64(offset);
}
ExecDropSingleTupleTableSlot(slot);
heap_endscan(scan);
heap_close(offsets, RowExclusiveLock);
}
示例7: ExecRecFetch
/*
* ExecRecFetch -- fetch next potential tuple
*
* This routine is concerned with substituting a test tuple if we are
* inside an EvalPlanQual recheck. If we aren't, just execute
* the access method's next-tuple routine.
*
* This method is identical to ExecScanFetch, we just want to avoid
* possible duplicate function issues.
*/
static inline TupleTableSlot *
ExecRecFetch(ScanState *node,
ExecScanAccessMtd accessMtd,
ExecScanRecheckMtd recheckMtd)
{
EState *estate = node->ps.state;
if (estate->es_epqTuple != NULL)
{
/*
* We are inside an EvalPlanQual recheck. Return the test tuple if
* one is available, after rechecking any access-method-specific
* conditions.
*/
Index scanrelid = ((Scan *) node->ps.plan)->scanrelid;
Assert(scanrelid > 0);
if (estate->es_epqTupleSet[scanrelid - 1])
{
TupleTableSlot *slot = node->ss_ScanTupleSlot;
/* Return empty slot if we already returned a tuple */
if (estate->es_epqScanDone[scanrelid - 1])
return ExecClearTuple(slot);
/* Else mark to remember that we shouldn't return more */
estate->es_epqScanDone[scanrelid - 1] = true;
/* Return empty slot if we haven't got a test tuple */
if (estate->es_epqTuple[scanrelid - 1] == NULL)
return ExecClearTuple(slot);
/* Store test tuple in the plan node's scan slot */
ExecStoreTuple(estate->es_epqTuple[scanrelid - 1],
slot, InvalidBuffer, false);
/* Check if it meets the access-method conditions */
if (!(*recheckMtd) (node, slot))
ExecClearTuple(slot); /* would not be returned by scan */
return slot;
}
}
/*
* Run the node-type-specific access method function to get the next tuple
*/
return (*accessMtd) (node);
}
示例8: gather_getnext
/*
* Read the next tuple. We might fetch a tuple from one of the tuple queues
* using gather_readnext, or if no tuple queue contains a tuple and the
* single_copy flag is not set, we might generate one locally instead.
*/
static TupleTableSlot *
gather_getnext(GatherState *gatherstate)
{
PlanState *outerPlan = outerPlanState(gatherstate);
TupleTableSlot *outerTupleSlot;
TupleTableSlot *fslot = gatherstate->funnel_slot;
HeapTuple tup;
while (gatherstate->nreaders > 0 || gatherstate->need_to_scan_locally)
{
CHECK_FOR_INTERRUPTS();
if (gatherstate->nreaders > 0)
{
tup = gather_readnext(gatherstate);
if (HeapTupleIsValid(tup))
{
ExecStoreTuple(tup, /* tuple to store */
fslot, /* slot in which to store the tuple */
InvalidBuffer, /* buffer associated with this
* tuple */
true); /* pfree tuple when done with it */
return fslot;
}
}
if (gatherstate->need_to_scan_locally)
{
EState *estate = gatherstate->ps.state;
/* Install our DSA area while executing the plan. */
estate->es_query_dsa =
gatherstate->pei ? gatherstate->pei->area : NULL;
outerTupleSlot = ExecProcNode(outerPlan);
estate->es_query_dsa = NULL;
if (!TupIsNull(outerTupleSlot))
return outerTupleSlot;
gatherstate->need_to_scan_locally = false;
}
}
return ExecClearTuple(fslot);
}
示例9: ExecCopySlot
/* --------------------------------
* ExecCopySlot
* Copy the source slot's contents into the destination slot.
*
* The destination acquires a private copy that will not go away
* if the source is cleared.
*
* The caller must ensure the slots have compatible tupdescs.
* --------------------------------
*/
TupleTableSlot *
ExecCopySlot(TupleTableSlot *dstslot, TupleTableSlot *srcslot)
{
HeapTuple newTuple;
MemoryContext oldContext;
/*
* There might be ways to optimize this when the source is virtual, but
* for now just always build a physical copy. Make sure it is in the
* right context.
*/
oldContext = MemoryContextSwitchTo(dstslot->tts_mcxt);
newTuple = ExecCopySlotTuple(srcslot);
MemoryContextSwitchTo(oldContext);
return ExecStoreTuple(newTuple, dstslot, InvalidBuffer, true);
}
示例10: CopyIntoStream
/*
* CopyIntoStream
*
* COPY events to a stream from an input source
*/
void
CopyIntoStream(Relation rel, TupleDesc desc, HeapTuple *tuples, int ntuples)
{
bool snap = ActiveSnapshotSet();
ResultRelInfo rinfo;
StreamInsertState *sis;
MemSet(&rinfo, 0, sizeof(ResultRelInfo));
rinfo.ri_RangeTableIndex = 1; /* dummy */
rinfo.ri_TrigDesc = NULL;
rinfo.ri_RelationDesc = rel;
if (snap)
PopActiveSnapshot();
BeginStreamModify(NULL, &rinfo, list_make1(desc), 0, 0);
sis = (StreamInsertState *) rinfo.ri_FdwState;
Assert(sis);
if (sis->queries)
{
TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(rel));
int i;
for (i = 0; i < ntuples; i++)
{
ExecStoreTuple(tuples[i], slot, InvalidBuffer, false);
ExecStreamInsert(NULL, &rinfo, slot, NULL);
ExecClearTuple(slot);
}
ExecDropSingleTupleTableSlot(slot);
Assert(sis->ntups == ntuples);
pgstat_increment_cq_write(ntuples, sis->nbytes);
}
EndStreamModify(NULL, &rinfo);
if (snap)
PushActiveSnapshot(GetTransactionSnapshot());
}
示例11: gather_getnext
/*
* Read the next tuple. We might fetch a tuple from one of the tuple queues
* using gather_readnext, or if no tuple queue contains a tuple and the
* single_copy flag is not set, we might generate one locally instead.
*/
static TupleTableSlot *
gather_getnext(GatherState *gatherstate)
{
PlanState *outerPlan = outerPlanState(gatherstate);
TupleTableSlot *outerTupleSlot;
TupleTableSlot *fslot = gatherstate->funnel_slot;
HeapTuple tup;
while (gatherstate->reader != NULL || gatherstate->need_to_scan_locally)
{
if (gatherstate->reader != NULL)
{
tup = gather_readnext(gatherstate);
if (HeapTupleIsValid(tup))
{
ExecStoreTuple(tup, /* tuple to store */
fslot, /* slot in which to store the tuple */
InvalidBuffer, /* buffer associated with this
* tuple */
true); /* pfree this pointer if not from heap */
return fslot;
}
}
if (gatherstate->need_to_scan_locally)
{
outerTupleSlot = ExecProcNode(outerPlan);
if (!TupIsNull(outerTupleSlot))
return outerTupleSlot;
gatherstate->need_to_scan_locally = false;
}
}
return ExecClearTuple(fslot);
}
示例12: CheckerConstraints
HeapTuple
CheckerConstraints(Checker *checker, HeapTuple tuple, int *parsing_field)
{
if (checker->has_constraints)
{
*parsing_field = 0;
/* Place tuple in tuple slot */
ExecStoreTuple(tuple, checker->slot, InvalidBuffer, false);
/* Check the constraints of the tuple */
ExecConstraints(checker->resultRelInfo, checker->slot, checker->estate);
}
else if (checker->has_not_null && HeapTupleHasNulls(tuple))
{
/*
* Even if CHECK_CONSTRAINTS is not specified, check NOT NULL constraint
*/
TupleDesc desc = checker->desc;
int i;
for (i = 0; i < desc->natts; i++)
{
if (desc->attrs[i]->attnotnull &&
att_isnull(i, tuple->t_data->t_bits))
{
*parsing_field = i + 1; /* 1 origin */
ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("null value in column \"%s\" violates not-null constraint",
NameStr(desc->attrs[i]->attname))));
}
}
}
return tuple;
}
示例13: ExecMaterializeSlot
/* --------------------------------
* ExecMaterializeSlot
* Force a slot into the "materialized" state.
*
* This causes the slot's tuple to be a local copy not dependent on
* any external storage. A pointer to the contained tuple is returned.
*
* A typical use for this operation is to prepare a computed tuple
* for being stored on disk. The original data may or may not be
* virtual, but in any case we need a private copy for heap_insert
* to scribble on.
* --------------------------------
*/
HeapTuple
ExecMaterializeSlot(TupleTableSlot *slot)
{
HeapTuple newTuple;
MemoryContext oldContext;
/*
* sanity checks
*/
Assert(slot != NULL);
Assert(!slot->tts_isempty);
/*
* If we have a physical tuple, and it's locally palloc'd, we have nothing
* to do.
*/
if (slot->tts_tuple && slot->tts_shouldFree)
return slot->tts_tuple;
/*
* Otherwise, copy or build a tuple, and then store it as the new slot
* value. (Note: tts_nvalid will be reset to zero here. There are cases
* in which this could be optimized but it's probably not worth worrying
* about.)
*
* We may be called in a context that is shorter-lived than the tuple
* slot, but we have to ensure that the materialized tuple will survive
* anyway.
*/
oldContext = MemoryContextSwitchTo(slot->tts_mcxt);
newTuple = ExecCopySlotTuple(slot);
MemoryContextSwitchTo(oldContext);
ExecStoreTuple(newTuple, slot, InvalidBuffer, true);
return slot->tts_tuple;
}
示例14: save_consumer_state
/*
* save_consumer_state
*
* Saves the given consumer's state to pipeline_kafka_consumers
*/
static void
save_consumer_state(KafkaConsumer *consumer, int partition_group)
{
ScanKeyData skey[1];
HeapTuple tup = NULL;
HeapScanDesc scan;
Relation offsets = open_pipeline_kafka_offsets();
Datum values[OFFSETS_RELATION_NATTS];
bool nulls[OFFSETS_RELATION_NATTS];
bool replace[OFFSETS_RELATION_NATTS];
bool updated[consumer->num_partitions];
TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(offsets));
int partition;
MemSet(updated, false, sizeof(updated));
ScanKeyInit(&skey[0], OFFSETS_ATTR_CONSUMER, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(consumer->id));
scan = heap_beginscan(offsets, GetTransactionSnapshot(), 1, skey);
/* update any existing offset rows */
while ((tup = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
Datum d;
bool isnull;
int partition;
HeapTuple modified;
ExecStoreTuple(tup, slot, InvalidBuffer, false);
d = slot_getattr(slot, OFFSETS_ATTR_PARTITION, &isnull);
partition = DatumGetInt32(d);
/* we only want to update the offsets we're responsible for */
if (partition % consumer->parallelism != partition_group)
continue;
MemSet(nulls, false, sizeof(nulls));
MemSet(replace, false, sizeof(nulls));
values[OFFSETS_ATTR_OFFSET - 1] = Int64GetDatum(consumer->offsets[partition]);
replace[OFFSETS_ATTR_OFFSET - 1] = true;
updated[partition] = true;
modified = heap_modify_tuple(tup, RelationGetDescr(offsets), values, nulls, replace);
simple_heap_update(offsets, &modified->t_self, modified);
}
heap_endscan(scan);
/* now insert any offset rows that didn't already exist */
for (partition = 0; partition < consumer->num_partitions; partition++)
{
if (updated[partition])
continue;
if (partition % consumer->parallelism != partition_group)
continue;
values[OFFSETS_ATTR_CONSUMER - 1] = ObjectIdGetDatum(consumer->id);
values[OFFSETS_ATTR_PARTITION - 1] = Int32GetDatum(partition);
values[OFFSETS_ATTR_OFFSET - 1] = Int64GetDatum(consumer->offsets[partition]);
MemSet(nulls, false, sizeof(nulls));
tup = heap_form_tuple(RelationGetDescr(offsets), values, nulls);
simple_heap_insert(offsets, tup);
}
ExecDropSingleTupleTableSlot(slot);
heap_close(offsets, NoLock);
}
示例15: SeqNext
/* ----------------------------------------------------------------
* SeqNext
*
* This is a workhorse for ExecSeqScan
* ----------------------------------------------------------------
*/
static TupleTableSlot *
SeqNext(SeqScanState *node)
{
HeapTuple tuple;
HeapScanDesc scandesc;
Index scanrelid;
EState *estate;
ScanDirection direction;
TupleTableSlot *slot;
/*
* get information from the estate and scan state
*/
estate = node->ps.state;
scandesc = node->ss_currentScanDesc;
scanrelid = ((SeqScan *) node->ps.plan)->scanrelid;
direction = estate->es_direction;
slot = node->ss_ScanTupleSlot;
/*
* Check if we are evaluating PlanQual for tuple of this relation.
* Additional checking is not good, but no other way for now. We could
* introduce new nodes for this case and handle SeqScan --> NewNode
* switching in Init/ReScan plan...
*/
if (estate->es_evTuple != NULL &&
estate->es_evTuple[scanrelid - 1] != NULL)
{
if (estate->es_evTupleNull[scanrelid - 1])
return ExecClearTuple(slot);
ExecStoreTuple(estate->es_evTuple[scanrelid - 1],
slot, InvalidBuffer, false);
/*
* Note that unlike IndexScan, SeqScan never use keys in
* heap_beginscan (and this is very bad) - so, here we do not check
* are keys ok or not.
*/
/* Flag for the next call that no more tuples */
estate->es_evTupleNull[scanrelid - 1] = true;
return slot;
}
/*
* get the next tuple from the access methods
*/
scandesc->estate=estate;
scandesc->slot=slot;
scandesc->qual=node->ps.qual;
tuple = heap_getnext(scandesc, direction);
/*
* save the tuple and the buffer returned to us by the access methods in
* our scan tuple slot and return the slot. Note: we pass 'false' because
* tuples returned by heap_getnext() are pointers onto disk pages and were
* not created with palloc() and so should not be pfree()'d. Note also
* that ExecStoreTuple will increment the refcount of the buffer; the
* refcount will not be dropped until the tuple table slot is cleared.
*/
if (tuple)
ExecStoreTuple(tuple, /* tuple to store */
slot, /* slot to store in */
scandesc->rs_cbuf, /* buffer associated with this
* tuple */
false); /* don't pfree this pointer */
else
ExecClearTuple(slot);
return slot;
}