本文整理汇总了C++中relation_open函数的典型用法代码示例。如果您正苦于以下问题:C++ relation_open函数的具体用法?C++ relation_open怎么用?C++ relation_open使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了relation_open函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: calculate_toast_table_size
/*
* Calculate total on-disk size of a TOAST relation, including its index.
* Must not be applied to non-TOAST relations.
*/
static int64 calculate_toast_table_size(oid_t toastrelid)
{
int64 size = 0;
struct relation * toastRel;
struct relation * toastIdxRel;
enum fork fnr;
toastRel = relation_open(toastrelid, ACCESS_SHR_LOCK);
/* toast heap size, including FSM and VM size */
for (fnr = 0; fnr <= MAX_FORK_NR; fnr++)
size += calculate_relation_size(&(toastRel->rd_node),
toastRel->rd_backend, fnr);
/* toast index size, including FSM and VM size */
toastIdxRel = relation_open(toastRel->rd_rel->reltoastidxid, ACCESS_SHR_LOCK);
for (fnr = 0; fnr <= MAX_FORK_NR; fnr++)
size += calculate_relation_size(&(toastIdxRel->rd_node),
toastIdxRel->rd_backend, fnr);
relation_close(toastIdxRel, ACCESS_SHR_LOCK);
relation_close(toastRel, ACCESS_SHR_LOCK);
return size;
}
示例2: calculate_indexes_size
/*
* Calculate total on-disk size of all indexes attached to the given table.
*
* Can be applied safely to an index, but you'll just get zero.
*/
static int64
calculate_indexes_size(Oid relOid)
{
int64 size = 0;
Relation rel;
rel = relation_open(relOid, AccessShareLock);
/*
* Aggregate all indexes on the given relation
*/
if (rel->rd_rel->relhasindex)
{
List *index_oids = RelationGetIndexList(rel);
ListCell *cell;
foreach(cell, index_oids)
{
Oid idxOid = lfirst_oid(cell);
Relation idxRel;
ForkNumber forkNum;
idxRel = relation_open(idxOid, AccessShareLock);
for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++)
size += calculate_relation_size(&(idxRel->rd_node),
idxRel->rd_backend,
forkNum);
relation_close(idxRel, AccessShareLock);
}
示例3: calculate_indexes_size
/*
* Calculate total on-disk size of all indexes attached to the given table.
*
* Can be applied safely to an index, but you'll just get zero.
*/
static int64 calculate_indexes_size(oid_t relOid)
{
int64 size = 0;
struct relation * rel;
rel = relation_open(relOid, ACCESS_SHR_LOCK);
/*
* Aggregate all indexes on the given relation
*/
if (rel->rd_rel->relhasindex) {
struct list *index_oids = rel_get_index_list(rel);
struct list_cell *cell;
foreach(cell, index_oids) {
oid_t idxOid = lfirst_oid(cell);
struct relation * idxRel;
enum fork fnr;
idxRel = relation_open(idxOid, ACCESS_SHR_LOCK);
for (fnr = 0; fnr <= MAX_FORK_NR; fnr++)
size += calculate_relation_size(&(idxRel->rd_node),
idxRel->rd_backend, fnr);
relation_close(idxRel, ACCESS_SHR_LOCK);
}
list_free(index_oids);
}
示例4: calculate_total_relation_size
/*
* Compute the on-disk size of files for the relation according to the
* stat function, including heap data, index data, and toast data.
*/
static int64
calculate_total_relation_size(Oid Relid)
{
Relation heapRel;
Oid toastOid;
int64 size;
ListCell *cell;
heapRel = relation_open(Relid, AccessShareLock);
toastOid = heapRel->rd_rel->reltoastrelid;
/* Get the heap size */
size = calculate_relation_size(&(heapRel->rd_node));
/* Include any dependent indexes */
if (heapRel->rd_rel->relhasindex)
{
List *index_oids = RelationGetIndexList(heapRel);
foreach(cell, index_oids)
{
Oid idxOid = lfirst_oid(cell);
Relation iRel;
iRel = relation_open(idxOid, AccessShareLock);
size += calculate_relation_size(&(iRel->rd_node));
relation_close(iRel, AccessShareLock);
}
示例5: calculate_toast_table_size
/*
* Calculate total on-disk size of a TOAST relation, including its index.
* Must not be applied to non-TOAST relations.
*/
static int64
calculate_toast_table_size(Oid toastrelid)
{
int64 size = 0;
Relation toastRel;
Relation toastIdxRel;
ForkNumber forkNum;
toastRel = relation_open(toastrelid, AccessShareLock);
/* toast heap size, including FSM and VM size */
for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++)
size += calculate_relation_size(&(toastRel->rd_node),
toastRel->rd_backend, forkNum);
/* toast index size, including FSM and VM size */
toastIdxRel = relation_open(toastRel->rd_rel->reltoastidxid, AccessShareLock);
for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++)
size += calculate_relation_size(&(toastIdxRel->rd_node),
toastIdxRel->rd_backend, forkNum);
relation_close(toastIdxRel, AccessShareLock);
relation_close(toastRel, AccessShareLock);
return size;
}
示例6: calculate_toast_table_size
/*
* Calculate total on-disk size of a TOAST relation, including its indexes.
* Must not be applied to non-TOAST relations.
*/
static int64
calculate_toast_table_size(Oid toastrelid)
{
int64 size = 0;
Relation toastRel;
ForkNumber forkNum;
ListCell *lc;
List *indexlist;
toastRel = relation_open(toastrelid, AccessShareLock);
/* toast heap size, including FSM and VM size */
for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++)
size += calculate_relation_size(&(toastRel->rd_node),
toastRel->rd_backend, forkNum);
/* toast index size, including FSM and VM size */
indexlist = RelationGetIndexList(toastRel);
/* Size is calculated using all the indexes available */
foreach(lc, indexlist)
{
Relation toastIdxRel;
toastIdxRel = relation_open(lfirst_oid(lc),
AccessShareLock);
for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++)
size += calculate_relation_size(&(toastIdxRel->rd_node),
toastIdxRel->rd_backend, forkNum);
relation_close(toastIdxRel, AccessShareLock);
}
示例7: table_key_index
/* Returns the relation object for the index that we're going to use as key for a
* particular table. (Indexes are relations too!) Returns null if the table is unkeyed.
* The return value is opened with a shared lock; call relation_close() when finished. */
Relation table_key_index(Relation rel) {
char replident = rel->rd_rel->relreplident;
Oid repl_ident_oid;
List *indexes;
ListCell *index_oid;
if (replident == REPLICA_IDENTITY_NOTHING) {
return NULL;
}
if (replident == REPLICA_IDENTITY_INDEX) {
repl_ident_oid = RelationGetReplicaIndex(rel);
if (repl_ident_oid != InvalidOid) {
return relation_open(repl_ident_oid, AccessShareLock);
}
}
// There doesn't seem to be a convenient way of getting the primary key index for
// a table, so we have to iterate over all the table's indexes.
indexes = RelationGetIndexList(rel);
foreach(index_oid, indexes) {
Relation index_rel = relation_open(lfirst_oid(index_oid), AccessShareLock);
Form_pg_index index = index_rel->rd_index;
if (IndexIsValid(index) && IndexIsReady(index) && index->indisprimary) {
list_free(indexes);
return index_rel;
}
relation_close(index_rel, AccessShareLock);
}
示例8: pg_truncate_visibility_map
/*
* Remove the visibility map fork for a relation. If there turn out to be
* any bugs in the visibility map code that require rebuilding the VM, this
* provides users with a way to do it that is cleaner than shutting down the
* server and removing files by hand.
*
* This is a cut-down version of RelationTruncate.
*/
Datum
pg_truncate_visibility_map(PG_FUNCTION_ARGS)
{
Oid relid = PG_GETARG_OID(0);
Relation rel;
rel = relation_open(relid, AccessExclusiveLock);
if (rel->rd_rel->relkind != RELKIND_RELATION &&
rel->rd_rel->relkind != RELKIND_MATVIEW &&
rel->rd_rel->relkind != RELKIND_TOASTVALUE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is not a table, materialized view, or TOAST table",
RelationGetRelationName(rel))));
RelationOpenSmgr(rel);
rel->rd_smgr->smgr_vm_nblocks = InvalidBlockNumber;
visibilitymap_truncate(rel, 0);
if (RelationNeedsWAL(rel))
{
xl_smgr_truncate xlrec;
xlrec.blkno = 0;
xlrec.rnode = rel->rd_node;
xlrec.flags = SMGR_TRUNCATE_VM;
XLogBeginInsert();
XLogRegisterData((char *) &xlrec, sizeof(xlrec));
XLogInsert(RM_SMGR_ID, XLOG_SMGR_TRUNCATE | XLR_SPECIAL_REL_UPDATE);
}
/*
* Release the lock right away, not at commit time.
*
* It would be a problem to release the lock prior to commit if this
* truncate operation sends any transactional invalidation messages. Other
* backends would potentially be able to lock the relation without
* processing them in the window of time between when we release the lock
* here and when we sent the messages at our eventual commit. However,
* we're currently only sending a non-transactional smgr invalidation,
* which will have been posted to shared memory immediately from within
* visibilitymap_truncate. Therefore, there should be no race here.
*
* The reason why it's desirable to release the lock early here is because
* of the possibility that someone will need to use this to blow away many
* visibility map forks at once. If we can't release the lock until
* commit time, the transaction doing this will accumulate
* AccessExclusiveLocks on all of those relations at the same time, which
* is undesirable. However, if this turns out to be unsafe we may have no
* choice...
*/
relation_close(rel, AccessExclusiveLock);
/* Nothing to return. */
PG_RETURN_VOID();
}
示例9: pg_visibility_map
/*
* Visibility map information for a single block of a relation.
*/
Datum
pg_visibility_map(PG_FUNCTION_ARGS)
{
Oid relid = PG_GETARG_OID(0);
int64 blkno = PG_GETARG_INT64(1);
int32 mapbits;
Relation rel;
Buffer vmbuffer = InvalidBuffer;
TupleDesc tupdesc;
Datum values[2];
bool nulls[2];
rel = relation_open(relid, AccessShareLock);
if (blkno < 0 || blkno > MaxBlockNumber)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid block number")));
tupdesc = pg_visibility_tupdesc(false, false);
MemSet(nulls, 0, sizeof(nulls));
mapbits = (int32) visibilitymap_get_status(rel, blkno, &vmbuffer);
if (vmbuffer != InvalidBuffer)
ReleaseBuffer(vmbuffer);
values[0] = BoolGetDatum((mapbits & VISIBILITYMAP_ALL_VISIBLE) != 0);
values[1] = BoolGetDatum((mapbits & VISIBILITYMAP_ALL_FROZEN) != 0);
relation_close(rel, AccessShareLock);
PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls)));
}
示例10: calculate_table_size
/*
* Calculate total on-disk size of a given table,
* including FSM and VM, plus TOAST table if any.
* Indexes other than the TOAST table's index are not included.
*
* Note that this also behaves sanely if applied to an index or toast table;
* those won't have attached toast tables, but they can have multiple forks.
*/
static int64
calculate_table_size(Oid relOid)
{
int64 size = 0;
Relation rel;
ForkNumber forkNum;
rel = relation_open(relOid, AccessShareLock);
/*
* heap size, including FSM and VM
*/
for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++)
size += calculate_relation_size(&(rel->rd_node), rel->rd_backend,
forkNum);
/*
* Size of toast relation
*/
if (OidIsValid(rel->rd_rel->reltoastrelid))
size += calculate_toast_table_size(rel->rd_rel->reltoastrelid);
relation_close(rel, AccessShareLock);
return size;
}
示例11: open_share_lock
/*
* Open the sequence and acquire AccessShareLock if needed
*
* If we haven't touched the sequence already in this transaction,
* we need to acquire AccessShareLock. We arrange for the lock to
* be owned by the top transaction, so that we don't need to do it
* more than once per xact.
*/
static Relation
open_share_lock(SeqTable seq)
{
LocalTransactionId thislxid = MyProc->lxid;
/* Get the lock if not already held in this xact */
if (seq->lxid != thislxid)
{
ResourceOwner currentOwner;
currentOwner = CurrentResourceOwner;
PG_TRY();
{
CurrentResourceOwner = TopTransactionResourceOwner;
LockRelationOid(seq->relid, AccessShareLock);
}
PG_CATCH();
{
/* Ensure CurrentResourceOwner is restored on error */
CurrentResourceOwner = currentOwner;
PG_RE_THROW();
}
PG_END_TRY();
CurrentResourceOwner = currentOwner;
/* Flag that we have a lock in the current xact */
seq->lxid = thislxid;
}
/* We now know we have AccessShareLock, and can safely open the rel */
return relation_open(seq->relid, NoLock);
}
示例12: collect_visibility_data
/*
* Collect visibility data about a relation.
*/
static vbits *
collect_visibility_data(Oid relid, bool include_pd)
{
Relation rel;
BlockNumber nblocks;
vbits *info;
BlockNumber blkno;
Buffer vmbuffer = InvalidBuffer;
BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD);
rel = relation_open(relid, AccessShareLock);
nblocks = RelationGetNumberOfBlocks(rel);
info = palloc0(offsetof(vbits, bits) + nblocks);
info->next = 0;
info->count = nblocks;
for (blkno = 0; blkno < nblocks; ++blkno)
{
int32 mapbits;
/* Make sure we are interruptible. */
CHECK_FOR_INTERRUPTS();
/* Get map info. */
mapbits = (int32) visibilitymap_get_status(rel, blkno, &vmbuffer);
if ((mapbits & VISIBILITYMAP_ALL_VISIBLE) != 0)
info->bits[blkno] |= (1 << 0);
if ((mapbits & VISIBILITYMAP_ALL_FROZEN) != 0)
info->bits[blkno] |= (1 << 1);
/*
* Page-level data requires reading every block, so only get it if
* the caller needs it. Use a buffer access strategy, too, to prevent
* cache-trashing.
*/
if (include_pd)
{
Buffer buffer;
Page page;
buffer = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
bstrategy);
LockBuffer(buffer, BUFFER_LOCK_SHARE);
page = BufferGetPage(buffer, NULL, NULL, BGP_NO_SNAPSHOT_TEST);
if (PageIsAllVisible(page))
info->bits[blkno] |= (1 << 2);
UnlockReleaseBuffer(buffer);
}
}
/* Clean up. */
if (vmbuffer != InvalidBuffer)
ReleaseBuffer(vmbuffer);
relation_close(rel, AccessShareLock);
return info;
}
示例13: create_reference_table
/*
* CreateReferenceTable creates a distributed table with the given relationId. The
* created table has one shard and replication factor is set to the active worker
* count. In fact, the above is the definition of a reference table in Citus.
*/
Datum
create_reference_table(PG_FUNCTION_ARGS)
{
Oid relationId = PG_GETARG_OID(0);
Relation relation = NULL;
char *colocateWithTableName = NULL;
List *workerNodeList = NIL;
int workerCount = 0;
Var *distributionColumn = NULL;
bool viaDeprecatedAPI = false;
EnsureCoordinator();
CheckCitusVersion(ERROR);
/*
* Ensure schema exists on each worker node. We can not run this function
* transactionally, since we may create shards over separate sessions and
* shard creation depends on the schema being present and visible from all
* sessions.
*/
EnsureSchemaExistsOnAllNodes(relationId);
/*
* Lock target relation with an exclusive lock - there's no way to make
* sense of this table until we've committed, and we don't want multiple
* backends manipulating this relation.
*/
relation = relation_open(relationId, ExclusiveLock);
/*
* We should do this check here since the codes in the following lines rely
* on this relation to have a supported relation kind. More extensive checks
* will be performed in CreateDistributedTable.
*/
EnsureRelationKindSupported(relationId);
workerNodeList = ActivePrimaryNodeList();
workerCount = list_length(workerNodeList);
/* if there are no workers, error out */
if (workerCount == 0)
{
char *relationName = get_rel_name(relationId);
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("cannot create reference table \"%s\"", relationName),
errdetail("There are no active worker nodes.")));
}
CreateDistributedTable(relationId, distributionColumn, DISTRIBUTE_BY_NONE,
colocateWithTableName, viaDeprecatedAPI);
relation_close(relation, NoLock);
PG_RETURN_VOID();
}
示例14: check_SPI_gettype
Datum
check_SPI_gettype(PG_FUNCTION_ARGS)
{
int fnumber = PG_GETARG_INT32(0);
Relation rel = relation_open(RelationRelationId, AccessShareLock);
char *name = SPI_gettype(RelationGetDescr(rel), fnumber);
relation_close(rel, AccessShareLock);
PG_RETURN_TEXT_P(cstring_to_text(name));
}
示例15: lock_test2
Datum
lock_test2(PG_FUNCTION_ARGS)
{
Oid table_oid;
text *lock_type;
float8 sleep_time;
const char *lock_type_str;
LOCKMODE lockmode;
Relation heapRelation;
table_oid = PG_GETARG_OID(0);
lock_type = PG_GETARG_TEXT_P(1);
sleep_time = PG_GETARG_FLOAT8(2);
lock_type_str = text_to_cstring(lock_type);
if (pg_strcasecmp(lock_type_str, "NOLOCK") == 0)
lockmode = NoLock;
else if (pg_strcasecmp(lock_type_str, "ACCESSSHARELOCK") == 0)
lockmode = AccessShareLock;
else if (pg_strcasecmp(lock_type_str, "ROWSHARELOCK") == 0)
lockmode = RowShareLock;
else if (pg_strcasecmp(lock_type_str, "ROWEXCLUSIVELOCK") == 0)
lockmode = RowExclusiveLock;
else if (pg_strcasecmp(lock_type_str, "SHAREUPDATEEXCLUSIVELOCK") == 0)
lockmode = ShareUpdateExclusiveLock;
else if (pg_strcasecmp(lock_type_str, "SHARELOCK") == 0)
lockmode = ShareLock;
else if (pg_strcasecmp(lock_type_str, "SHAREEXCLUSIVELOCK") == 0)
lockmode = ShareRowExclusiveLock;
else if (pg_strcasecmp(lock_type_str, "EXCLUSIVELOCK") == 0)
lockmode = ExclusiveLock;
else if (pg_strcasecmp(lock_type_str, "ACCESSEXCLUSIVELOCK") == 0)
lockmode = AccessExclusiveLock;
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("\"lock_mode\" is set to invalid string: %s", lock_type_str),
errhint("\"lock_mode\" must select one among NoLock, AccessShareLock, RowShareLock, RowExclusiveLock, ShareUpdateExclusiveLock, ShareLock, ShareExclusiveLock, ExclusiveLock, and AccessExclusiveLock")));
elog(NOTICE, "enter lock %d as %s", table_oid, lock_type_str);
heapRelation = relation_open(table_oid, lockmode);
elog(NOTICE, "succeed locking %d as %s", table_oid, lock_type_str);
pg_usleep(sleep_time * 1000000L);
relation_close(heapRelation, lockmode);
elog(NOTICE, "exit lock %d as %s", table_oid, lock_type_str);
PG_RETURN_VOID();
}