本文整理汇总了C++中RelationGetRelationName函数的典型用法代码示例。如果您正苦于以下问题:C++ RelationGetRelationName函数的具体用法?C++ RelationGetRelationName怎么用?C++ RelationGetRelationName使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了RelationGetRelationName函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: gistUserPicksplit
/*
* Calls user picksplit method for attno columns to split vector to
* two vectors. May use attno+n columns data to
* get better split.
* Returns TRUE and v->spl_equiv = NULL if left and right unions of attno columns are the same,
* so caller may find better split
* Returns TRUE and v->spl_equiv != NULL if there is tuples which may be freely moved
*/
static bool
gistUserPicksplit(Relation r, GistEntryVector *entryvec, int attno, GistSplitVector *v,
IndexTuple *itup, int len, GISTSTATE *giststate)
{
GIST_SPLITVEC *sv = &v->splitVector;
/*
* now let the user-defined picksplit function set up the split vector; in
* entryvec there is no null value!!
*/
sv->spl_ldatum_exists = (v->spl_lisnull[attno]) ? false : true;
sv->spl_rdatum_exists = (v->spl_risnull[attno]) ? false : true;
sv->spl_ldatum = v->spl_lattr[attno];
sv->spl_rdatum = v->spl_rattr[attno];
FunctionCall2Coll(&giststate->picksplitFn[attno],
giststate->supportCollation[attno],
PointerGetDatum(entryvec),
PointerGetDatum(sv));
if (sv->spl_nleft == 0 || sv->spl_nright == 0)
{
ereport(DEBUG1,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("picksplit method for column %d of index \"%s\" failed",
attno + 1, RelationGetRelationName(r)),
errhint("The index is not optimal. To optimize it, contact a developer, or try to use the column as the second one in the CREATE INDEX command.")));
/*
* Reinit GIST_SPLITVEC. Although that fields are not used by
* genericPickSplit(), let us set up it for further processing
*/
sv->spl_ldatum_exists = (v->spl_lisnull[attno]) ? false : true;
sv->spl_rdatum_exists = (v->spl_risnull[attno]) ? false : true;
sv->spl_ldatum = v->spl_lattr[attno];
sv->spl_rdatum = v->spl_rattr[attno];
genericPickSplit(giststate, entryvec, sv, attno);
if (sv->spl_ldatum_exists || sv->spl_rdatum_exists)
supportSecondarySplit(r, giststate, attno, sv, v->spl_lattr[attno], v->spl_rattr[attno]);
}
else
{
/* compatibility with old code */
if (sv->spl_left[sv->spl_nleft - 1] == InvalidOffsetNumber)
sv->spl_left[sv->spl_nleft - 1] = (OffsetNumber) (entryvec->n - 1);
if (sv->spl_right[sv->spl_nright - 1] == InvalidOffsetNumber)
sv->spl_right[sv->spl_nright - 1] = (OffsetNumber) (entryvec->n - 1);
if (sv->spl_ldatum_exists || sv->spl_rdatum_exists)
{
elog(LOG, "picksplit method for column %d of index \"%s\" doesn't support secondary split",
attno + 1, RelationGetRelationName(r));
supportSecondarySplit(r, giststate, attno, sv, v->spl_lattr[attno], v->spl_rattr[attno]);
}
}
v->spl_lattr[attno] = sv->spl_ldatum;
v->spl_rattr[attno] = sv->spl_rdatum;
v->spl_lisnull[attno] = false;
v->spl_risnull[attno] = false;
/*
* if index is multikey, then we must to try get smaller bounding box for
* subkey(s)
*/
v->spl_equiv = NULL;
if (giststate->tupdesc->natts > 1 && attno + 1 != giststate->tupdesc->natts)
{
if (gistKeyIsEQ(giststate, attno, sv->spl_ldatum, sv->spl_rdatum))
{
/*
* Left and right key's unions are equial, so we can get better
* split by following columns. Note, unions for attno columns are
* already done.
*/
return true;
}
else
{
int LenEquiv;
v->spl_equiv = (bool *) palloc0(sizeof(bool) * (entryvec->n + 1));
LenEquiv = gistfindgroup(r, giststate, entryvec->vector, v, attno);
/*
//.........这里部分代码省略.........
示例2: ginbuild
IndexBuildResult *
ginbuild(Relation heap, Relation index, IndexInfo *indexInfo)
{
IndexBuildResult *result;
double reltuples;
GinBuildState buildstate;
Buffer RootBuffer,
MetaBuffer;
ItemPointerData *list;
Datum key;
GinNullCategory category;
uint32 nlist;
MemoryContext oldCtx;
OffsetNumber attnum;
if (RelationGetNumberOfBlocks(index) != 0)
elog(ERROR, "index \"%s\" already contains data",
RelationGetRelationName(index));
initGinState(&buildstate.ginstate, index);
buildstate.indtuples = 0;
memset(&buildstate.buildStats, 0, sizeof(GinStatsData));
/* initialize the meta page */
MetaBuffer = GinNewBuffer(index);
/* initialize the root page */
RootBuffer = GinNewBuffer(index);
START_CRIT_SECTION();
GinInitMetabuffer(MetaBuffer);
MarkBufferDirty(MetaBuffer);
GinInitBuffer(RootBuffer, GIN_LEAF);
MarkBufferDirty(RootBuffer);
if (RelationNeedsWAL(index))
{
XLogRecPtr recptr;
Page page;
XLogBeginInsert();
XLogRegisterBuffer(0, MetaBuffer, REGBUF_WILL_INIT);
XLogRegisterBuffer(1, RootBuffer, REGBUF_WILL_INIT);
recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_CREATE_INDEX);
page = BufferGetPage(RootBuffer);
PageSetLSN(page, recptr);
page = BufferGetPage(MetaBuffer);
PageSetLSN(page, recptr);
}
UnlockReleaseBuffer(MetaBuffer);
UnlockReleaseBuffer(RootBuffer);
END_CRIT_SECTION();
/* count the root as first entry page */
buildstate.buildStats.nEntryPages++;
/*
* create a temporary memory context that is used to hold data not yet
* dumped out to the index
*/
buildstate.tmpCtx = AllocSetContextCreate(CurrentMemoryContext,
"Gin build temporary context",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
/*
* create a temporary memory context that is used for calling
* ginExtractEntries(), and can be reset after each tuple
*/
buildstate.funcCtx = AllocSetContextCreate(CurrentMemoryContext,
"Gin build temporary context for user-defined function",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
buildstate.accum.ginstate = &buildstate.ginstate;
ginInitBA(&buildstate.accum);
/*
* Do the heap scan. We disallow sync scan here because dataPlaceToPage
* prefers to receive tuples in TID order.
*/
reltuples = IndexBuildHeapScan(heap, index, indexInfo, false,
ginBuildCallback, (void *) &buildstate);
/* dump remaining entries to the index */
oldCtx = MemoryContextSwitchTo(buildstate.tmpCtx);
ginBeginBAScan(&buildstate.accum);
while ((list = ginGetBAEntry(&buildstate.accum,
&attnum, &key, &category, &nlist)) != NULL)
{
/* there could be many entries, so be willing to abort here */
CHECK_FOR_INTERRUPTS();
ginEntryInsert(&buildstate.ginstate, attnum, key, category,
list, nlist, &buildstate.buildStats);
//.........这里部分代码省略.........
示例3: lazy_truncate_heap
/*
* lazy_truncate_heap - try to truncate off any empty pages at the end
*/
static void
lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
{
BlockNumber old_rel_pages = vacrelstats->rel_pages;
BlockNumber new_rel_pages;
PGRUsage ru0;
int lock_retry;
pg_rusage_init(&ru0);
/*
* Loop until no more truncating can be done.
*/
do
{
/*
* We need full exclusive lock on the relation in order to do
* truncation. If we can't get it, give up rather than waiting --- we
* don't want to block other backends, and we don't want to deadlock
* (which is quite possible considering we already hold a lower-grade
* lock).
*/
vacrelstats->lock_waiter_detected = false;
lock_retry = 0;
while (true)
{
if (ConditionalLockRelation(onerel, AccessExclusiveLock))
break;
/*
* Check for interrupts while trying to (re-)acquire the exclusive
* lock.
*/
CHECK_FOR_INTERRUPTS();
if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT /
VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL))
{
/*
* We failed to establish the lock in the specified number of
* retries. This means we give up truncating.
*/
vacrelstats->lock_waiter_detected = true;
ereport(elevel,
(errmsg("\"%s\": stopping truncate due to conflicting lock request",
RelationGetRelationName(onerel))));
return;
}
pg_usleep(VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL);
}
/*
* Now that we have exclusive lock, look to see if the rel has grown
* whilst we were vacuuming with non-exclusive lock. If so, give up;
* the newly added pages presumably contain non-deletable tuples.
*/
new_rel_pages = RelationGetNumberOfBlocks(onerel);
if (new_rel_pages != old_rel_pages)
{
/*
* Note: we intentionally don't update vacrelstats->rel_pages with
* the new rel size here. If we did, it would amount to assuming
* that the new pages are empty, which is unlikely. Leaving the
* numbers alone amounts to assuming that the new pages have the
* same tuple density as existing ones, which is less unlikely.
*/
UnlockRelation(onerel, AccessExclusiveLock);
return;
}
/*
* Scan backwards from the end to verify that the end pages actually
* contain no tuples. This is *necessary*, not optional, because
* other backends could have added tuples to these pages whilst we
* were vacuuming.
*/
new_rel_pages = count_nondeletable_pages(onerel, vacrelstats);
if (new_rel_pages >= old_rel_pages)
{
/* can't do anything after all */
UnlockRelation(onerel, AccessExclusiveLock);
return;
}
/*
* Okay to truncate.
*/
RelationTruncate(onerel, new_rel_pages);
/*
* We can release the exclusive lock as soon as we have truncated.
* Other backends can't safely access the relation until they have
* processed the smgr invalidation that smgrtruncate sent out ... but
* that should happen as part of standard invalidation processing once
* they acquire lock on the relation.
//.........这里部分代码省略.........
示例4: PrintRelCacheLeakWarning
/*
* Debugging subroutine
*/
static void
PrintRelCacheLeakWarning(Relation rel)
{
elog(WARNING, "relcache reference leak: relation \"%s\" not closed",
RelationGetRelationName(rel));
}
示例5: spgGetCache
/*
* Fetch local cache of AM-specific info about the index, initializing it
* if necessary
*/
SpGistCache *
spgGetCache(Relation index)
{
SpGistCache *cache;
if (index->rd_amcache == NULL)
{
Oid atttype;
spgConfigIn in;
FmgrInfo *procinfo;
Buffer metabuffer;
SpGistMetaPageData *metadata;
cache = MemoryContextAllocZero(index->rd_indexcxt,
sizeof(SpGistCache));
/* SPGiST doesn't support multi-column indexes */
Assert(index->rd_att->natts == 1);
/*
* Get the actual data type of the indexed column from the index
* tupdesc. We pass this to the opclass config function so that
* polymorphic opclasses are possible.
*/
atttype = index->rd_att->attrs[0]->atttypid;
/* Call the config function to get config info for the opclass */
in.attType = atttype;
procinfo = index_getprocinfo(index, 1, SPGIST_CONFIG_PROC);
FunctionCall2Coll(procinfo,
index->rd_indcollation[0],
PointerGetDatum(&in),
PointerGetDatum(&cache->config));
/* Get the information we need about each relevant datatype */
fillTypeDesc(&cache->attType, atttype);
fillTypeDesc(&cache->attPrefixType, cache->config.prefixType);
fillTypeDesc(&cache->attLabelType, cache->config.labelType);
/* Last, get the lastUsedPages data from the metapage */
metabuffer = ReadBuffer(index, SPGIST_METAPAGE_BLKNO);
LockBuffer(metabuffer, BUFFER_LOCK_SHARE);
metadata = SpGistPageGetMeta(BufferGetPage(metabuffer));
if (metadata->magicNumber != SPGIST_MAGIC_NUMBER)
elog(ERROR, "index \"%s\" is not an SP-GiST index",
RelationGetRelationName(index));
cache->lastUsedPages = metadata->lastUsedPages;
UnlockReleaseBuffer(metabuffer);
index->rd_amcache = (void *) cache;
}
else
{
/* assume it's up to date */
cache = (SpGistCache *) index->rd_amcache;
}
return cache;
}
示例6: gistbuild
/*
* Main entry point to GiST index build. Initially calls insert over and over,
* but switches to more efficient buffering build algorithm after a certain
* number of tuples (unless buffering mode is disabled).
*/
Datum
gistbuild(PG_FUNCTION_ARGS)
{
Relation heap = (Relation) PG_GETARG_POINTER(0);
Relation index = (Relation) PG_GETARG_POINTER(1);
IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
IndexBuildResult *result;
double reltuples;
GISTBuildState buildstate;
Buffer buffer;
Page page;
MemoryContext oldcxt = CurrentMemoryContext;
int fillfactor;
buildstate.indexrel = index;
if (index->rd_options)
{
/* Get buffering mode from the options string */
GiSTOptions *options = (GiSTOptions *) index->rd_options;
char *bufferingMode = (char *) options + options->bufferingModeOffset;
if (strcmp(bufferingMode, "on") == 0)
buildstate.bufferingMode = GIST_BUFFERING_STATS;
else if (strcmp(bufferingMode, "off") == 0)
buildstate.bufferingMode = GIST_BUFFERING_DISABLED;
else
buildstate.bufferingMode = GIST_BUFFERING_AUTO;
fillfactor = options->fillfactor;
}
else
{
/*
* By default, switch to buffering mode when the index grows too large
* to fit in cache.
*/
buildstate.bufferingMode = GIST_BUFFERING_AUTO;
fillfactor = GIST_DEFAULT_FILLFACTOR;
}
/* Calculate target amount of free space to leave on pages */
buildstate.freespace = BLCKSZ * (100 - fillfactor) / 100;
/*
* We expect to be called exactly once for any index relation. If that's
* not the case, big trouble's what we have.
*/
if (RelationGetNumberOfBlocks(index) != 0)
elog(ERROR, "index \"%s\" already contains data",
RelationGetRelationName(index));
/*
* We can't yet handle unlogged GiST indexes, because we depend on LSNs.
* This is duplicative of an error in gistbuildempty, but we want to check
* here so as to throw error before doing all the index-build work.
*/
if (heap->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("unlogged GiST indexes are not supported")));
/* no locking is needed */
buildstate.giststate = initGISTstate(index);
/*
* Create a temporary memory context that is reset once for each tuple
* processed. (Note: we don't bother to make this a child of the
* giststate's scanCxt, so we have to delete it separately at the end.)
*/
buildstate.giststate->tempCxt = createTempGistContext();
/* initialize the root page */
buffer = gistNewBuffer(index);
Assert(BufferGetBlockNumber(buffer) == GIST_ROOT_BLKNO);
page = BufferGetPage(buffer);
START_CRIT_SECTION();
GISTInitBuffer(buffer, F_LEAF);
MarkBufferDirty(buffer);
if (RelationNeedsWAL(index))
{
XLogRecPtr recptr;
XLogRecData rdata;
rdata.data = (char *) &(index->rd_node);
rdata.len = sizeof(RelFileNode);
rdata.buffer = InvalidBuffer;
rdata.next = NULL;
recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_CREATE_INDEX, &rdata);
PageSetLSN(page, recptr);
PageSetTLI(page, ThisTimeLineID);
}
//.........这里部分代码省略.........
示例7: entrySplitPage
/*
* Place tuple and split page, original buffer(lbuf) leaves untouched,
* returns shadow page of lbuf filled new data.
* Tuples are distributed between pages by equal size on its, not
* an equal number!
*/
static Page
entrySplitPage(RumBtree btree, Buffer lbuf, Buffer rbuf,
Page lPage, Page rPage, OffsetNumber off)
{
OffsetNumber i,
maxoff,
separator = InvalidOffsetNumber;
Size totalsize = 0;
Size lsize = 0,
size;
char *ptr;
IndexTuple itup,
leftrightmost = NULL;
Page page;
Page newlPage = PageGetTempPageCopy(lPage);
Size pageSize = PageGetPageSize(newlPage);
static char tupstore[2 * BLCKSZ];
entryPreparePage(btree, newlPage, off);
maxoff = PageGetMaxOffsetNumber(newlPage);
ptr = tupstore;
for (i = FirstOffsetNumber; i <= maxoff; i++)
{
if (i == off)
{
size = MAXALIGN(IndexTupleSize(btree->entry));
memcpy(ptr, btree->entry, size);
ptr += size;
totalsize += size + sizeof(ItemIdData);
}
itup = (IndexTuple) PageGetItem(newlPage, PageGetItemId(newlPage, i));
size = MAXALIGN(IndexTupleSize(itup));
memcpy(ptr, itup, size);
ptr += size;
totalsize += size + sizeof(ItemIdData);
}
if (off == maxoff + 1)
{
size = MAXALIGN(IndexTupleSize(btree->entry));
memcpy(ptr, btree->entry, size);
totalsize += size + sizeof(ItemIdData);
}
RumInitPage(rPage, RumPageGetOpaque(newlPage)->flags, pageSize);
RumInitPage(newlPage, RumPageGetOpaque(rPage)->flags, pageSize);
ptr = tupstore;
maxoff++;
lsize = 0;
page = newlPage;
for (i = FirstOffsetNumber; i <= maxoff; i++)
{
itup = (IndexTuple) ptr;
if (lsize > totalsize / 2)
{
if (separator == InvalidOffsetNumber)
separator = i - 1;
page = rPage;
}
else
{
leftrightmost = itup;
lsize += MAXALIGN(IndexTupleSize(itup)) + sizeof(ItemIdData);
}
if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), InvalidOffsetNumber, false, false) == InvalidOffsetNumber)
elog(ERROR, "failed to add item to index page in \"%s\"",
RelationGetRelationName(btree->index));
ptr += MAXALIGN(IndexTupleSize(itup));
}
btree->entry = RumFormInteriorTuple(btree, leftrightmost, newlPage,
BufferGetBlockNumber(lbuf));
btree->rightblkno = BufferGetBlockNumber(rbuf);
return newlPage;
}
示例8: apply_handle_update
//.........这里部分代码省略.........
ensure_transaction();
relid = logicalrep_read_update(s, &has_oldtup, &oldtup,
&newtup);
rel = logicalrep_rel_open(relid, RowExclusiveLock);
if (!should_apply_changes_for_rel(rel))
{
/*
* The relation can't become interesting in the middle of the
* transaction so it's safe to unlock it.
*/
logicalrep_rel_close(rel, RowExclusiveLock);
return;
}
/* Check if we can do the update. */
check_relation_updatable(rel);
/* Initialize the executor state. */
estate = create_estate_for_relation(rel);
remoteslot = ExecInitExtraTupleSlot(estate,
RelationGetDescr(rel->localrel));
localslot = ExecInitExtraTupleSlot(estate,
RelationGetDescr(rel->localrel));
EvalPlanQualInit(&epqstate, estate, NULL, NIL, -1);
PushActiveSnapshot(GetTransactionSnapshot());
ExecOpenIndices(estate->es_result_relation_info, false);
/* Build the search tuple. */
oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
slot_store_cstrings(remoteslot, rel,
has_oldtup ? oldtup.values : newtup.values);
MemoryContextSwitchTo(oldctx);
/*
* Try to find tuple using either replica identity index, primary key or
* if needed, sequential scan.
*/
idxoid = GetRelationIdentityOrPK(rel->localrel);
Assert(OidIsValid(idxoid) ||
(rel->remoterel.replident == REPLICA_IDENTITY_FULL && has_oldtup));
if (OidIsValid(idxoid))
found = RelationFindReplTupleByIndex(rel->localrel, idxoid,
LockTupleExclusive,
remoteslot, localslot);
else
found = RelationFindReplTupleSeq(rel->localrel, LockTupleExclusive,
remoteslot, localslot);
ExecClearTuple(remoteslot);
/*
* Tuple found.
*
* Note this will fail if there are other conflicting unique indexes.
*/
if (found)
{
/* Process and store remote tuple in the slot */
oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
ExecStoreTuple(localslot->tts_tuple, remoteslot, InvalidBuffer, false);
slot_modify_cstrings(remoteslot, rel, newtup.values, newtup.changed);
MemoryContextSwitchTo(oldctx);
EvalPlanQualSetSlot(&epqstate, remoteslot);
/* Do the actual update. */
ExecSimpleRelationUpdate(estate, &epqstate, localslot, remoteslot);
}
else
{
/*
* The tuple to be updated could not be found.
*
* TODO what to do here, change the log level to LOG perhaps?
*/
elog(DEBUG1,
"logical replication did not find row for update "
"in replication target relation \"%s\"",
RelationGetRelationName(rel->localrel));
}
/* Cleanup. */
ExecCloseIndices(estate->es_result_relation_info);
PopActiveSnapshot();
/* Handle queued AFTER triggers. */
AfterTriggerEndQuery(estate);
EvalPlanQualEnd(&epqstate);
ExecResetTupleTable(estate->es_tupleTable, false);
FreeExecutorState(estate);
logicalrep_rel_close(rel, NoLock);
CommandCounterIncrement();
}
示例9: XLogOpenRelation
/*
* Open a relation during XLOG replay
*/
Relation
XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
{
XLogRelDesc *res;
XLogRelCacheEntry *hentry;
bool found;
hentry = (XLogRelCacheEntry *)
hash_search(_xlrelcache, (void *) &rnode, HASH_FIND, NULL);
if (hentry)
{
res = hentry->rdesc;
res->lessRecently->moreRecently = res->moreRecently;
res->moreRecently->lessRecently = res->lessRecently;
}
else
{
res = _xl_new_reldesc();
sprintf(RelationGetRelationName(&(res->reldata)), "%u", rnode.relNode);
res->reldata.rd_node = rnode;
/*
* We set up the lockRelId in case anything tries to lock the
* dummy relation. Note that this is fairly bogus since relNode
* may be different from the relation's OID. It shouldn't really
* matter though, since we are presumably running by ourselves and
* can't have any lock conflicts ...
*/
res->reldata.rd_lockInfo.lockRelId.dbId = rnode.dbNode;
res->reldata.rd_lockInfo.lockRelId.relId = rnode.relNode;
hentry = (XLogRelCacheEntry *)
hash_search(_xlrelcache, (void *) &rnode, HASH_ENTER, &found);
if (hentry == NULL)
elog(PANIC, "XLogOpenRelation: out of memory for cache");
if (found)
elog(PANIC, "XLogOpenRelation: file found on insert into cache");
hentry->rdesc = res;
res->reldata.rd_targblock = InvalidBlockNumber;
res->reldata.rd_smgr = NULL;
RelationOpenSmgr(&(res->reldata));
/*
* Create the target file if it doesn't already exist. This lets
* us cope if the replay sequence contains writes to a relation
* that is later deleted. (The original coding of this routine
* would instead return NULL, causing the writes to be suppressed.
* But that seems like it risks losing valuable data if the
* filesystem loses an inode during a crash. Better to write the
* data until we are actually told to delete the file.)
*/
smgrcreate(res->reldata.rd_smgr, res->reldata.rd_istemp, true);
}
res->moreRecently = &(_xlrelarr[0]);
res->lessRecently = _xlrelarr[0].lessRecently;
_xlrelarr[0].lessRecently = res;
res->lessRecently->moreRecently = res;
return (&(res->reldata));
}
示例10: publication_add_relation
/*
* Insert new publication / relation mapping.
*/
ObjectAddress
publication_add_relation(Oid pubid, Relation targetrel,
bool if_not_exists)
{
Relation rel;
HeapTuple tup;
Datum values[Natts_pg_publication_rel];
bool nulls[Natts_pg_publication_rel];
Oid relid = RelationGetRelid(targetrel);
Oid prrelid;
Publication *pub = GetPublication(pubid);
ObjectAddress myself,
referenced;
rel = table_open(PublicationRelRelationId, RowExclusiveLock);
/*
* Check for duplicates. Note that this does not really prevent
* duplicates, it's here just to provide nicer error message in common
* case. The real protection is the unique key on the catalog.
*/
if (SearchSysCacheExists2(PUBLICATIONRELMAP, ObjectIdGetDatum(relid),
ObjectIdGetDatum(pubid)))
{
table_close(rel, RowExclusiveLock);
if (if_not_exists)
return InvalidObjectAddress;
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("relation \"%s\" is already member of publication \"%s\"",
RelationGetRelationName(targetrel), pub->name)));
}
check_publication_add_relation(targetrel);
/* Form a tuple. */
memset(values, 0, sizeof(values));
memset(nulls, false, sizeof(nulls));
prrelid = GetNewOidWithIndex(rel, PublicationRelObjectIndexId,
Anum_pg_publication_rel_oid);
values[Anum_pg_publication_rel_oid - 1] = ObjectIdGetDatum(prrelid);
values[Anum_pg_publication_rel_prpubid - 1] =
ObjectIdGetDatum(pubid);
values[Anum_pg_publication_rel_prrelid - 1] =
ObjectIdGetDatum(relid);
tup = heap_form_tuple(RelationGetDescr(rel), values, nulls);
/* Insert tuple into catalog. */
CatalogTupleInsert(rel, tup);
heap_freetuple(tup);
ObjectAddressSet(myself, PublicationRelRelationId, prrelid);
/* Add dependency on the publication */
ObjectAddressSet(referenced, PublicationRelationId, pubid);
recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
/* Add dependency on the relation */
ObjectAddressSet(referenced, RelationRelationId, relid);
recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
/* Close the table. */
table_close(rel, RowExclusiveLock);
/* Invalidate relcache so that publication info is rebuilt. */
CacheInvalidateRelcache(targetrel);
return myself;
}
示例11: initGinState
/*
* initGinState: fill in an empty GinState struct to describe the index
*
* Note: assorted subsidiary data is allocated in the CurrentMemoryContext.
*/
void
initGinState(GinState *state, Relation index)
{
TupleDesc origTupdesc = RelationGetDescr(index);
int i;
MemSet(state, 0, sizeof(GinState));
state->index = index;
state->oneCol = (origTupdesc->natts == 1) ? true : false;
state->origTupdesc = origTupdesc;
for (i = 0; i < origTupdesc->natts; i++)
{
if (state->oneCol)
state->tupdesc[i] = state->origTupdesc;
else
{
state->tupdesc[i] = CreateTemplateTupleDesc(2, false);
TupleDescInitEntry(state->tupdesc[i], (AttrNumber) 1, NULL,
INT2OID, -1, 0);
TupleDescInitEntry(state->tupdesc[i], (AttrNumber) 2, NULL,
origTupdesc->attrs[i]->atttypid,
origTupdesc->attrs[i]->atttypmod,
origTupdesc->attrs[i]->attndims);
TupleDescInitEntryCollation(state->tupdesc[i], (AttrNumber) 2,
origTupdesc->attrs[i]->attcollation);
}
/*
* If the compare proc isn't specified in the opclass definition, look
* up the index key type's default btree comparator.
*/
if (index_getprocid(index, i + 1, GIN_COMPARE_PROC) != InvalidOid)
{
fmgr_info_copy(&(state->compareFn[i]),
index_getprocinfo(index, i + 1, GIN_COMPARE_PROC),
CurrentMemoryContext);
}
else
{
TypeCacheEntry *typentry;
typentry = lookup_type_cache(origTupdesc->attrs[i]->atttypid,
TYPECACHE_CMP_PROC_FINFO);
if (!OidIsValid(typentry->cmp_proc_finfo.fn_oid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
errmsg("could not identify a comparison function for type %s",
format_type_be(origTupdesc->attrs[i]->atttypid))));
fmgr_info_copy(&(state->compareFn[i]),
&(typentry->cmp_proc_finfo),
CurrentMemoryContext);
}
/* Opclass must always provide extract procs */
fmgr_info_copy(&(state->extractValueFn[i]),
index_getprocinfo(index, i + 1, GIN_EXTRACTVALUE_PROC),
CurrentMemoryContext);
fmgr_info_copy(&(state->extractQueryFn[i]),
index_getprocinfo(index, i + 1, GIN_EXTRACTQUERY_PROC),
CurrentMemoryContext);
/*
* Check opclass capability to do tri-state or binary logic consistent
* check.
*/
if (index_getprocid(index, i + 1, GIN_TRICONSISTENT_PROC) != InvalidOid)
{
fmgr_info_copy(&(state->triConsistentFn[i]),
index_getprocinfo(index, i + 1, GIN_TRICONSISTENT_PROC),
CurrentMemoryContext);
}
if (index_getprocid(index, i + 1, GIN_CONSISTENT_PROC) != InvalidOid)
{
fmgr_info_copy(&(state->consistentFn[i]),
index_getprocinfo(index, i + 1, GIN_CONSISTENT_PROC),
CurrentMemoryContext);
}
if (state->consistentFn[i].fn_oid == InvalidOid &&
state->triConsistentFn[i].fn_oid == InvalidOid)
{
elog(ERROR, "missing GIN support function (%d or %d) for attribute %d of index \"%s\"",
GIN_CONSISTENT_PROC, GIN_TRICONSISTENT_PROC,
i + 1, RelationGetRelationName(index));
}
/*
* Check opclass capability to do partial match.
*/
if (index_getprocid(index, i + 1, GIN_COMPARE_PARTIAL_PROC) != InvalidOid)
{
//.........这里部分代码省略.........
示例12: pgstatginindex
/* ------------------------------------------------------
* pgstatginindex()
*
* Usage: SELECT * FROM pgstatginindex('ginindex');
* ------------------------------------------------------
*/
Datum
pgstatginindex(PG_FUNCTION_ARGS)
{
Oid relid = PG_GETARG_OID(0);
Relation rel;
Buffer buffer;
Page page;
GinMetaPageData *metadata;
GinIndexStat stats;
HeapTuple tuple;
TupleDesc tupleDesc;
Datum values[3];
bool nulls[3] = {false, false, false};
Datum result;
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to use pgstattuple functions"))));
rel = relation_open(relid, AccessShareLock);
if (!IS_INDEX(rel) || !IS_GIN(rel))
elog(ERROR, "relation \"%s\" is not a GIN index",
RelationGetRelationName(rel));
/*
* Reject attempts to read non-local temporary relations; we would be
* likely to get wrong data since we have no visibility into the owning
* session's local buffers.
*/
if (RELATION_IS_OTHER_TEMP(rel))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot access temporary indexes of other sessions")));
/*
* Read metapage
*/
buffer = ReadBuffer(rel, GIN_METAPAGE_BLKNO);
LockBuffer(buffer, GIN_SHARE);
page = BufferGetPage(buffer);
metadata = GinPageGetMeta(page);
stats.version = metadata->ginVersion;
stats.pending_pages = metadata->nPendingPages;
stats.pending_tuples = metadata->nPendingHeapTuples;
UnlockReleaseBuffer(buffer);
relation_close(rel, AccessShareLock);
/*
* Build a tuple descriptor for our result type
*/
if (get_call_result_type(fcinfo, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE)
elog(ERROR, "return type must be a row type");
values[0] = Int32GetDatum(stats.version);
values[1] = UInt32GetDatum(stats.pending_pages);
values[2] = Int64GetDatum(stats.pending_tuples);
/*
* Build and return the tuple
*/
tuple = heap_form_tuple(tupleDesc, values, nulls);
result = HeapTupleGetDatum(tuple);
PG_RETURN_DATUM(result);
}
示例13: pgstatindex_impl
static Datum
pgstatindex_impl(Relation rel, FunctionCallInfo fcinfo)
{
Datum result;
BlockNumber nblocks;
BlockNumber blkno;
BTIndexStat indexStat;
BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD);
if (!IS_INDEX(rel) || !IS_BTREE(rel))
elog(ERROR, "relation \"%s\" is not a btree index",
RelationGetRelationName(rel));
/*
* Reject attempts to read non-local temporary relations; we would be
* likely to get wrong data since we have no visibility into the owning
* session's local buffers.
*/
if (RELATION_IS_OTHER_TEMP(rel))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot access temporary tables of other sessions")));
/*
* Read metapage
*/
{
Buffer buffer = ReadBufferExtended(rel, MAIN_FORKNUM, 0, RBM_NORMAL, bstrategy);
Page page = BufferGetPage(buffer);
BTMetaPageData *metad = BTPageGetMeta(page);
indexStat.version = metad->btm_version;
indexStat.level = metad->btm_level;
indexStat.root_blkno = metad->btm_root;
ReleaseBuffer(buffer);
}
/* -- init counters -- */
indexStat.root_pages = 0;
indexStat.internal_pages = 0;
indexStat.leaf_pages = 0;
indexStat.empty_pages = 0;
indexStat.deleted_pages = 0;
indexStat.max_avail = 0;
indexStat.free_space = 0;
indexStat.fragments = 0;
/*
* Scan all blocks except the metapage
*/
nblocks = RelationGetNumberOfBlocks(rel);
for (blkno = 1; blkno < nblocks; blkno++)
{
Buffer buffer;
Page page;
BTPageOpaque opaque;
CHECK_FOR_INTERRUPTS();
/* Read and lock buffer */
buffer = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
LockBuffer(buffer, BUFFER_LOCK_SHARE);
page = BufferGetPage(buffer);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/* Determine page type, and update totals */
if (P_ISLEAF(opaque))
{
int max_avail;
max_avail = BLCKSZ - (BLCKSZ - ((PageHeader) page)->pd_special + SizeOfPageHeaderData);
indexStat.max_avail += max_avail;
indexStat.free_space += PageGetFreeSpace(page);
indexStat.leaf_pages++;
/*
* If the next leaf is on an earlier block, it means a
* fragmentation.
*/
if (opaque->btpo_next != P_NONE && opaque->btpo_next < blkno)
indexStat.fragments++;
}
else if (P_ISDELETED(opaque))
indexStat.deleted_pages++;
else if (P_IGNORE(opaque))
indexStat.empty_pages++;
else if (P_ISROOT(opaque))
indexStat.root_pages++;
else
indexStat.internal_pages++;
/* Unlock and release buffer */
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
//.........这里部分代码省略.........
示例14: _hash_metapinit
/*
* _hash_metapinit() -- Initialize the metadata page of a hash index,
* the initial buckets, and the initial bitmap page.
*
* The initial number of buckets is dependent on num_tuples, an estimate
* of the number of tuples to be loaded into the index initially. The
* chosen number of buckets is returned.
*
* We are fairly cavalier about locking here, since we know that no one else
* could be accessing this index. In particular the rule about not holding
* multiple buffer locks is ignored.
*/
uint32
_hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
{
HashMetaPage metap;
HashPageOpaque pageopaque;
Buffer metabuf;
Buffer buf;
Page pg;
int32 data_width;
int32 item_width;
int32 ffactor;
double dnumbuckets;
uint32 num_buckets;
uint32 log2_num_buckets;
uint32 i;
/* safety check */
if (RelationGetNumberOfBlocksInFork(rel, forkNum) != 0)
elog(ERROR, "cannot initialize non-empty hash index \"%s\"",
RelationGetRelationName(rel));
/*
* Determine the target fill factor (in tuples per bucket) for this index.
* The idea is to make the fill factor correspond to pages about as full
* as the user-settable fillfactor parameter says. We can compute it
* exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
*/
data_width = sizeof(uint32);
item_width = MAXALIGN(sizeof(IndexTupleData)) + MAXALIGN(data_width) +
sizeof(ItemIdData); /* include the line pointer */
ffactor = RelationGetTargetPageUsage(rel, HASH_DEFAULT_FILLFACTOR) / item_width;
/* keep to a sane range */
if (ffactor < 10)
ffactor = 10;
/*
* Choose the number of initial bucket pages to match the fill factor
* given the estimated number of tuples. We round up the result to the
* next power of 2, however, and always force at least 2 bucket pages. The
* upper limit is determined by considerations explained in
* _hash_expandtable().
*/
dnumbuckets = num_tuples / ffactor;
if (dnumbuckets <= 2.0)
num_buckets = 2;
else if (dnumbuckets >= (double) 0x40000000)
num_buckets = 0x40000000;
else
num_buckets = ((uint32) 1) << _hash_log2((uint32) dnumbuckets);
log2_num_buckets = _hash_log2(num_buckets);
Assert(num_buckets == (((uint32) 1) << log2_num_buckets));
Assert(log2_num_buckets < HASH_MAX_SPLITPOINTS);
/*
* We initialize the metapage, the first N bucket pages, and the first
* bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
* calls to occur. This ensures that the smgr level has the right idea of
* the physical index length.
*/
metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum);
pg = BufferGetPage(metabuf);
pageopaque = (HashPageOpaque) PageGetSpecialPointer(pg);
pageopaque->hasho_prevblkno = InvalidBlockNumber;
pageopaque->hasho_nextblkno = InvalidBlockNumber;
pageopaque->hasho_bucket = -1;
pageopaque->hasho_flag = LH_META_PAGE;
pageopaque->hasho_page_id = HASHO_PAGE_ID;
metap = HashPageGetMeta(pg);
metap->hashm_magic = HASH_MAGIC;
metap->hashm_version = HASH_VERSION;
metap->hashm_ntuples = 0;
metap->hashm_nmaps = 0;
metap->hashm_ffactor = ffactor;
metap->hashm_bsize = HashGetMaxBitmapSize(pg);
/* find largest bitmap array size that will fit in page size */
for (i = _hash_log2(metap->hashm_bsize); i > 0; --i)
{
if ((1 << i) <= metap->hashm_bsize)
break;
}
Assert(i > 0);
metap->hashm_bmsize = 1 << i;
metap->hashm_bmshift = i + BYTE_TO_BIT;
Assert((1 << BMPG_SHIFT(metap)) == (BMPG_MASK(metap) + 1));
//.........这里部分代码省略.........
示例15: apply_handle_delete
/*
* Handle DELETE message.
*
* TODO: FDW support
*/
static void
apply_handle_delete(StringInfo s)
{
LogicalRepRelMapEntry *rel;
LogicalRepTupleData oldtup;
LogicalRepRelId relid;
Oid idxoid;
EState *estate;
EPQState epqstate;
TupleTableSlot *remoteslot;
TupleTableSlot *localslot;
bool found;
MemoryContext oldctx;
ensure_transaction();
relid = logicalrep_read_delete(s, &oldtup);
rel = logicalrep_rel_open(relid, RowExclusiveLock);
if (!should_apply_changes_for_rel(rel))
{
/*
* The relation can't become interesting in the middle of the
* transaction so it's safe to unlock it.
*/
logicalrep_rel_close(rel, RowExclusiveLock);
return;
}
/* Check if we can do the delete. */
check_relation_updatable(rel);
/* Initialize the executor state. */
estate = create_estate_for_relation(rel);
remoteslot = ExecInitExtraTupleSlot(estate,
RelationGetDescr(rel->localrel));
localslot = ExecInitExtraTupleSlot(estate,
RelationGetDescr(rel->localrel));
EvalPlanQualInit(&epqstate, estate, NULL, NIL, -1);
PushActiveSnapshot(GetTransactionSnapshot());
ExecOpenIndices(estate->es_result_relation_info, false);
/* Find the tuple using the replica identity index. */
oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
slot_store_cstrings(remoteslot, rel, oldtup.values);
MemoryContextSwitchTo(oldctx);
/*
* Try to find tuple using either replica identity index, primary key or
* if needed, sequential scan.
*/
idxoid = GetRelationIdentityOrPK(rel->localrel);
Assert(OidIsValid(idxoid) ||
(rel->remoterel.replident == REPLICA_IDENTITY_FULL));
if (OidIsValid(idxoid))
found = RelationFindReplTupleByIndex(rel->localrel, idxoid,
LockTupleExclusive,
remoteslot, localslot);
else
found = RelationFindReplTupleSeq(rel->localrel, LockTupleExclusive,
remoteslot, localslot);
/* If found delete it. */
if (found)
{
EvalPlanQualSetSlot(&epqstate, localslot);
/* Do the actual delete. */
ExecSimpleRelationDelete(estate, &epqstate, localslot);
}
else
{
/* The tuple to be deleted could not be found. */
ereport(DEBUG1,
(errmsg("logical replication could not find row for delete "
"in replication target relation \"%s\"",
RelationGetRelationName(rel->localrel))));
}
/* Cleanup. */
ExecCloseIndices(estate->es_result_relation_info);
PopActiveSnapshot();
/* Handle queued AFTER triggers. */
AfterTriggerEndQuery(estate);
EvalPlanQualEnd(&epqstate);
ExecResetTupleTable(estate->es_tupleTable, false);
FreeExecutorState(estate);
logicalrep_rel_close(rel, NoLock);
CommandCounterIncrement();
}