本文整理汇总了C++中LWLockRelease函数的典型用法代码示例。如果您正苦于以下问题:C++ LWLockRelease函数的具体用法?C++ LWLockRelease怎么用?C++ LWLockRelease使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了LWLockRelease函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: SimpleLruReadPage
/*
* Find a page in a shared buffer, reading it in if necessary.
* The page number must correspond to an already-initialized page.
*
* If write_ok is true then it is OK to return a page that is in
* WRITE_IN_PROGRESS state; it is the caller's responsibility to be sure
* that modification of the page is safe. If write_ok is false then we
* will not return the page until it is not undergoing active I/O.
*
* The passed-in xid is used only for error reporting, and may be
* InvalidTransactionId if no specific xid is associated with the action.
*
* Return value is the shared-buffer slot number now holding the page.
* The buffer's LRU access info is updated.
*
* Control lock must be held at entry, and will be held at exit.
*/
int
SimpleLruReadPage(SlruCtl ctl, int pageno, bool write_ok,
TransactionId xid)
{
SlruShared shared = ctl->shared;
/* Outer loop handles restart if we must wait for someone else's I/O */
for (;;)
{
int slotno;
bool ok;
/* See if page already is in memory; if not, pick victim slot */
slotno = SlruSelectLRUPage(ctl, pageno);
/* Did we find the page in memory? */
if (shared->page_number[slotno] == pageno &&
shared->page_status[slotno] != SLRU_PAGE_EMPTY)
{
/*
* If page is still being read in, we must wait for I/O. Likewise
* if the page is being written and the caller said that's not OK.
*/
if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS ||
(shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS &&
!write_ok))
{
SimpleLruWaitIO(ctl, slotno);
/* Now we must recheck state from the top */
continue;
}
/* Otherwise, it's ready to use */
SlruRecentlyUsed(shared, slotno);
return slotno;
}
/* We found no match; assert we selected a freeable slot */
Assert(shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
(shared->page_status[slotno] == SLRU_PAGE_VALID &&
!shared->page_dirty[slotno]));
/* Mark the slot read-busy */
shared->page_number[slotno] = pageno;
shared->page_status[slotno] = SLRU_PAGE_READ_IN_PROGRESS;
shared->page_dirty[slotno] = false;
/* Acquire per-buffer lock (cannot deadlock, see notes at top) */
LWLockAcquire(shared->buffer_locks[slotno], LW_EXCLUSIVE);
/* Release control lock while doing I/O */
LWLockRelease(shared->ControlLock);
/* Do the read */
ok = SlruPhysicalReadPage(ctl, pageno, slotno);
/* Set the LSNs for this newly read-in page to zero */
SimpleLruZeroLSNs(ctl, slotno);
/* Re-acquire control lock and update page state */
LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
Assert(shared->page_number[slotno] == pageno &&
shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS &&
!shared->page_dirty[slotno]);
shared->page_status[slotno] = ok ? SLRU_PAGE_VALID : SLRU_PAGE_EMPTY;
LWLockRelease(shared->buffer_locks[slotno]);
/* Now it's okay to ereport if we failed */
if (!ok)
SlruReportIOError(ctl, pageno, xid);
SlruRecentlyUsed(shared, slotno);
return slotno;
}
}
示例2: SIInsertDataEntries
/*
* SIInsertDataEntries
* Add new invalidation message(s) to the buffer.
*/
void
SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
{
SISeg *segP = shmInvalBuffer;
/*
* N can be arbitrarily large. We divide the work into groups of no more
* than WRITE_QUANTUM messages, to be sure that we don't hold the lock for
* an unreasonably long time. (This is not so much because we care about
* letting in other writers, as that some just-caught-up backend might be
* trying to do SICleanupQueue to pass on its signal, and we don't want it
* to have to wait a long time.) Also, we need to consider calling
* SICleanupQueue every so often.
*/
while (n > 0)
{
int nthistime = Min(n, WRITE_QUANTUM);
int numMsgs;
int max;
n -= nthistime;
LWLockAcquire(SInvalWriteLock, LW_EXCLUSIVE);
/*
* If the buffer is full, we *must* acquire some space. Clean the
* queue and reset anyone who is preventing space from being freed.
* Otherwise, clean the queue only when it's exceeded the next
* fullness threshold. We have to loop and recheck the buffer state
* after any call of SICleanupQueue.
*/
for (;;)
{
numMsgs = segP->maxMsgNum - segP->minMsgNum;
if (numMsgs + nthistime > MAXNUMMESSAGES ||
numMsgs >= segP->nextThreshold)
SICleanupQueue(true, nthistime);
else
break;
}
/*
* Insert new message(s) into proper slot of circular buffer
*/
max = segP->maxMsgNum;
while (nthistime-- > 0)
{
segP->buffer[max % MAXNUMMESSAGES] = *data++;
max++;
}
/* Update current value of maxMsgNum using spinlock */
{
/* use volatile pointer to prevent code rearrangement */
volatile SISeg *vsegP = segP;
SpinLockAcquire(&vsegP->msgnumLock);
vsegP->maxMsgNum = max;
SpinLockRelease(&vsegP->msgnumLock);
}
LWLockRelease(SInvalWriteLock);
}
}
示例3: SICleanupQueue
//.........这里部分代码省略.........
/* Lock out all writers and readers */
if (!callerHasWriteLock)
LWLockAcquire(SInvalWriteLock, LW_EXCLUSIVE);
LWLockAcquire(SInvalReadLock, LW_EXCLUSIVE);
/*
* Recompute minMsgNum = minimum of all backends' nextMsgNum, identify the
* furthest-back backend that needs signaling (if any), and reset any
* backends that are too far back. Note that because we ignore sendOnly
* backends here it is possible for them to keep sending messages without
* a problem even when they are the only active backend.
*/
min = segP->maxMsgNum;
minsig = min - SIG_THRESHOLD;
lowbound = min - MAXNUMMESSAGES + minFree;
for (i = 0; i < segP->lastBackend; i++)
{
ProcState *stateP = &segP->procState[i];
int n = stateP->nextMsgNum;
/* Ignore if inactive or already in reset state */
if (stateP->procPid == 0 || stateP->resetState || stateP->sendOnly)
continue;
/*
* If we must free some space and this backend is preventing it, force
* him into reset state and then ignore until he catches up.
*/
if (n < lowbound)
{
stateP->resetState = true;
/* no point in signaling him ... */
continue;
}
/* Track the global minimum nextMsgNum */
if (n < min)
min = n;
/* Also see who's furthest back of the unsignaled backends */
if (n < minsig && !stateP->signaled)
{
minsig = n;
needSig = stateP;
}
}
segP->minMsgNum = min;
/*
* When minMsgNum gets really large, decrement all message counters so as
* to forestall overflow of the counters. This happens seldom enough that
* folding it into the previous loop would be a loser.
*/
if (min >= MSGNUMWRAPAROUND)
{
segP->minMsgNum -= MSGNUMWRAPAROUND;
segP->maxMsgNum -= MSGNUMWRAPAROUND;
for (i = 0; i < segP->lastBackend; i++)
{
/* we don't bother skipping inactive entries here */
segP->procState[i].nextMsgNum -= MSGNUMWRAPAROUND;
}
}
/*
* Determine how many messages are still in the queue, and set the
* threshold at which we should repeat SICleanupQueue().
*/
numMsgs = segP->maxMsgNum - segP->minMsgNum;
if (numMsgs < CLEANUP_MIN)
segP->nextThreshold = CLEANUP_MIN;
else
segP->nextThreshold = (numMsgs / CLEANUP_QUANTUM + 1) * CLEANUP_QUANTUM;
/*
* Lastly, signal anyone who needs a catchup interrupt. Since
* SendProcSignal() might not be fast, we don't want to hold locks while
* executing it.
*/
if (needSig)
{
pid_t his_pid = needSig->procPid;
BackendId his_backendId = (needSig - &segP->procState[0]) + 1;
needSig->signaled = true;
LWLockRelease(SInvalReadLock);
LWLockRelease(SInvalWriteLock);
elog(DEBUG4, "sending sinval catchup signal to PID %d", (int) his_pid);
SendProcSignal(his_pid, PROCSIG_CATCHUP_INTERRUPT, his_backendId);
if (callerHasWriteLock)
LWLockAcquire(SInvalWriteLock, LW_EXCLUSIVE);
}
else
{
LWLockRelease(SInvalReadLock);
if (!callerHasWriteLock)
LWLockRelease(SInvalWriteLock);
}
}
示例4: commit_ts_redo
/*
* CommitTS resource manager's routines
*/
void
commit_ts_redo(XLogReaderState *record)
{
uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
/* Backup blocks are not used in commit_ts records */
Assert(!XLogRecHasAnyBlockRefs(record));
if (info == COMMIT_TS_ZEROPAGE)
{
int pageno;
int slotno;
memcpy(&pageno, XLogRecGetData(record), sizeof(int));
LWLockAcquire(CommitTsControlLock, LW_EXCLUSIVE);
slotno = ZeroCommitTsPage(pageno, false);
SimpleLruWritePage(CommitTsCtl, slotno);
Assert(!CommitTsCtl->shared->page_dirty[slotno]);
LWLockRelease(CommitTsControlLock);
}
else if (info == COMMIT_TS_TRUNCATE)
{
int pageno;
memcpy(&pageno, XLogRecGetData(record), sizeof(int));
/*
* During XLOG replay, latest_page_number isn't set up yet; insert a
* suitable value to bypass the sanity test in SimpleLruTruncate.
*/
CommitTsCtl->shared->latest_page_number = pageno;
SimpleLruTruncate(CommitTsCtl, pageno);
}
else if (info == COMMIT_TS_SETTS)
{
xl_commit_ts_set *setts = (xl_commit_ts_set *) XLogRecGetData(record);
int nsubxids;
TransactionId *subxids;
nsubxids = ((XLogRecGetDataLen(record) - SizeOfCommitTsSet) /
sizeof(TransactionId));
if (nsubxids > 0)
{
subxids = palloc(sizeof(TransactionId) * nsubxids);
memcpy(subxids,
XLogRecGetData(record) + SizeOfCommitTsSet,
sizeof(TransactionId) * nsubxids);
}
else
subxids = NULL;
TransactionTreeSetCommitTsData(setts->mainxid, nsubxids, subxids,
setts->timestamp, setts->nodeid, true);
if (subxids)
pfree(subxids);
}
else
elog(PANIC, "commit_ts_redo: unknown op code %u", info);
}
示例5: task_tracker_assign_task
/*
* task_tracker_assign_task creates a new task in the shared hash or updates an
* already existing task. The function also creates a schema for the job if it
* doesn't already exist.
*/
Datum
task_tracker_assign_task(PG_FUNCTION_ARGS)
{
uint64 jobId = PG_GETARG_INT64(0);
uint32 taskId = PG_GETARG_UINT32(1);
text *taskCallStringText = PG_GETARG_TEXT_P(2);
StringInfo jobSchemaName = JobSchemaName(jobId);
bool schemaExists = false;
WorkerTask *workerTask = NULL;
char *taskCallString = text_to_cstring(taskCallStringText);
uint32 taskCallStringLength = strlen(taskCallString);
/* check that we have a running task tracker on this host */
bool taskTrackerRunning = TaskTrackerRunning();
if (!taskTrackerRunning)
{
ereport(ERROR, (errcode(ERRCODE_CANNOT_CONNECT_NOW),
errmsg("the task tracker has been disabled or shut down")));
}
/* check that we have enough space in our shared hash for this string */
if (taskCallStringLength >= TASK_CALL_STRING_SIZE)
{
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("task call string exceeds maximum assignable length")));
}
/*
* If the schema does not exist, we create it. However, the schema does not
* become visible to other processes until the transaction commits, and we
* therefore do not release the resource lock in this case. Otherwise, the
* schema is already visible, and we immediately release the resource lock.
*/
LockJobResource(jobId, AccessExclusiveLock);
schemaExists = JobSchemaExists(jobSchemaName);
if (!schemaExists)
{
/* lock gets automatically released upon return from this function */
CreateJobSchema(jobSchemaName);
}
else
{
UnlockJobResource(jobId, AccessExclusiveLock);
}
LWLockAcquire(&WorkerTasksSharedState->taskHashLock, LW_EXCLUSIVE);
/* check if we already have the task in our shared hash */
workerTask = WorkerTasksHashFind(jobId, taskId);
if (workerTask == NULL)
{
CreateTask(jobId, taskId, taskCallString);
}
else
{
UpdateTask(workerTask, taskCallString);
}
LWLockRelease(&WorkerTasksSharedState->taskHashLock);
PG_RETURN_VOID();
}
示例6: pgss_shmem_startup
/*
* shmem_startup hook: allocate or attach to shared memory,
* then load any pre-existing statistics from file.
*/
static void
pgss_shmem_startup(void)
{
bool found;
HASHCTL info;
FILE *file;
uint32 header;
int32 num;
int32 i;
int query_size;
int buffer_size;
char *buffer = NULL;
if (prev_shmem_startup_hook)
prev_shmem_startup_hook();
/* reset in case this is a restart within the postmaster */
pgss = NULL;
pgss_hash = NULL;
/*
* Create or attach to the shared memory state, including hash table
*/
LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE);
pgss = ShmemInitStruct("pg_stat_statements",
sizeof(pgssSharedState),
&found);
if (!found)
{
/* First time through ... */
pgss->lock = LWLockAssign();
pgss->query_size = pgstat_track_activity_query_size;
}
/* Be sure everyone agrees on the hash table entry size */
query_size = pgss->query_size;
memset(&info, 0, sizeof(info));
info.keysize = sizeof(pgssHashKey);
info.entrysize = offsetof(pgssEntry, query) +query_size;
info.hash = pgss_hash_fn;
info.match = pgss_match_fn;
pgss_hash = ShmemInitHash("pg_stat_statements hash",
pgss_max, pgss_max,
&info,
HASH_ELEM | HASH_FUNCTION | HASH_COMPARE);
LWLockRelease(AddinShmemInitLock);
/*
* If we're in the postmaster (or a standalone backend...), set up a shmem
* exit hook to dump the statistics to disk.
*/
if (!IsUnderPostmaster)
on_shmem_exit(pgss_shmem_shutdown, (Datum) 0);
/*
* Attempt to load old statistics from the dump file, if this is the first
* time through and we weren't told not to.
*/
if (found || !pgss_save)
return;
/*
* Note: we don't bother with locks here, because there should be no other
* processes running when this code is reached.
*/
file = AllocateFile(PGSS_DUMP_FILE, PG_BINARY_R);
if (file == NULL)
{
if (errno == ENOENT)
return; /* ignore not-found error */
goto error;
}
buffer_size = query_size;
buffer = (char *) palloc(buffer_size);
if (fread(&header, sizeof(uint32), 1, file) != 1 ||
header != PGSS_FILE_HEADER ||
fread(&num, sizeof(int32), 1, file) != 1)
goto error;
for (i = 0; i < num; i++)
{
pgssEntry temp;
pgssEntry *entry;
if (fread(&temp, offsetof(pgssEntry, mutex), 1, file) != 1)
goto error;
/* Encoding is the only field we can easily sanity-check */
if (!PG_VALID_BE_ENCODING(temp.key.encoding))
goto error;
//.........这里部分代码省略.........
示例7: pg_stat_statements
//.........这里部分代码省略.........
Oid userid = GetUserId();
bool is_superuser = superuser();
HASH_SEQ_STATUS hash_seq;
pgssEntry *entry;
if (!pgss || !pgss_hash)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("pg_stat_statements must be loaded via shared_preload_libraries")));
/* check to see if caller supports us returning a tuplestore */
if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("set-valued function called in context that cannot accept a set")));
if (!(rsinfo->allowedModes & SFRM_Materialize))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("materialize mode required, but it is not " \
"allowed in this context")));
/* Build a tuple descriptor for our result type */
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
elog(ERROR, "return type must be a row type");
per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
oldcontext = MemoryContextSwitchTo(per_query_ctx);
tupstore = tuplestore_begin_heap(true, false, work_mem);
rsinfo->returnMode = SFRM_Materialize;
rsinfo->setResult = tupstore;
rsinfo->setDesc = tupdesc;
MemoryContextSwitchTo(oldcontext);
LWLockAcquire(pgss->lock, LW_SHARED);
hash_seq_init(&hash_seq, pgss_hash);
while ((entry = hash_seq_search(&hash_seq)) != NULL)
{
Datum values[PG_STAT_STATEMENTS_COLS];
bool nulls[PG_STAT_STATEMENTS_COLS];
int i = 0;
Counters tmp;
memset(values, 0, sizeof(values));
memset(nulls, 0, sizeof(nulls));
values[i++] = ObjectIdGetDatum(entry->key.userid);
values[i++] = ObjectIdGetDatum(entry->key.dbid);
if (is_superuser || entry->key.userid == userid)
{
char *qstr;
qstr = (char *)
pg_do_encoding_conversion((unsigned char *) entry->query,
entry->key.query_len,
entry->key.encoding,
GetDatabaseEncoding());
values[i++] = CStringGetTextDatum(qstr);
if (qstr != entry->query)
pfree(qstr);
}
else
values[i++] = CStringGetTextDatum("<insufficient privilege>");
/* copy counters to a local variable to keep locking time short */
{
volatile pgssEntry *e = (volatile pgssEntry *) entry;
SpinLockAcquire(&e->mutex);
tmp = e->counters;
SpinLockRelease(&e->mutex);
}
values[i++] = Int64GetDatumFast(tmp.calls);
values[i++] = Float8GetDatumFast(tmp.total_time);
values[i++] = Int64GetDatumFast(tmp.rows);
values[i++] = Int64GetDatumFast(tmp.shared_blks_hit);
values[i++] = Int64GetDatumFast(tmp.shared_blks_read);
values[i++] = Int64GetDatumFast(tmp.shared_blks_written);
values[i++] = Int64GetDatumFast(tmp.local_blks_hit);
values[i++] = Int64GetDatumFast(tmp.local_blks_read);
values[i++] = Int64GetDatumFast(tmp.local_blks_written);
values[i++] = Int64GetDatumFast(tmp.temp_blks_read);
values[i++] = Int64GetDatumFast(tmp.temp_blks_written);
Assert(i == PG_STAT_STATEMENTS_COLS);
tuplestore_putvalues(tupstore, tupdesc, values, nulls);
}
LWLockRelease(pgss->lock);
/* clean up and return the tuplestore */
tuplestore_donestoring(tupstore);
return (Datum) 0;
}
示例8: TablespaceCreateDbspace
//.........这里部分代码省略.........
if (spcNode == GLOBALTABLESPACE_OID)
return;
Assert(OidIsValid(spcNode));
Assert(OidIsValid(dbNode));
dir = GetDatabasePath(dbNode, spcNode);
if (stat(dir, &st) < 0)
{
/* Directory does not exist? */
if (errno == ENOENT)
{
/*
* Acquire TablespaceCreateLock to ensure that no DROP TABLESPACE
* or TablespaceCreateDbspace is running concurrently.
*/
LWLockAcquire(TablespaceCreateLock, LW_EXCLUSIVE);
/*
* Recheck to see if someone created the directory while we were
* waiting for lock.
*/
if (stat(dir, &st) == 0 && S_ISDIR(st.st_mode))
{
/* Directory was created */
}
else
{
/* Directory creation failed? */
if (mkdir(dir, S_IRWXU) < 0)
{
char *parentdir;
/* Failure other than not exists or not in WAL replay? */
if (errno != ENOENT || !isRedo)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not create directory \"%s\": %m",
dir)));
/*
* Parent directories are missing during WAL replay, so
* continue by creating simple parent directories rather
* than a symlink.
*/
/* create two parents up if not exist */
parentdir = pstrdup(dir);
get_parent_directory(parentdir);
get_parent_directory(parentdir);
/* Can't create parent and it doesn't already exist? */
if (mkdir(parentdir, S_IRWXU) < 0 && errno != EEXIST)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not create directory \"%s\": %m",
parentdir)));
pfree(parentdir);
/* create one parent up if not exist */
parentdir = pstrdup(dir);
get_parent_directory(parentdir);
/* Can't create parent and it doesn't already exist? */
if (mkdir(parentdir, S_IRWXU) < 0 && errno != EEXIST)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not create directory \"%s\": %m",
parentdir)));
pfree(parentdir);
/* Create database directory */
if (mkdir(dir, S_IRWXU) < 0)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not create directory \"%s\": %m",
dir)));
}
}
LWLockRelease(TablespaceCreateLock);
}
else
{
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not stat directory \"%s\": %m", dir)));
}
}
else
{
/* Is it not a directory? */
if (!S_ISDIR(st.st_mode))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" exists but is not a directory",
dir)));
}
pfree(dir);
}
示例9: smgrDoAppendOnlyResyncEofs
void
smgrDoAppendOnlyResyncEofs(bool forCommit)
{
HASH_SEQ_STATUS iterateStatus;
AppendOnlyMirrorResyncEofs *entry;
AppendOnlyMirrorResyncEofs *entryExample = NULL;
int appendOnlyMirrorResyncEofsCount;
if (AppendOnlyMirrorResyncEofsTable == NULL)
return;
if (Debug_persistent_print ||
Debug_persistent_appendonly_commit_count_print)
elog(Persistent_DebugPrintLevel(),
"Storage Manager: Enter Append-Only mirror resync eofs list entries (Append-Only commit work count %d)",
FileRepPrimary_GetAppendOnlyCommitWorkCount());
hash_seq_init(
&iterateStatus,
AppendOnlyMirrorResyncEofsTable);
appendOnlyMirrorResyncEofsCount = 0;
while ((entry = hash_seq_search(&iterateStatus)) != NULL)
{
if (entryExample == NULL)
{
entryExample = entry;
}
if (forCommit)
{
PersistentFileSysObj_UpdateAppendOnlyMirrorResyncEofs(&entry->key.relFileNode,
entry->key.segmentFileNum,
&entry->persistentTid,
entry->persistentSerialNum,
entry->mirrorCatchupRequired,
entry->mirrorNewEof,
/* recovery */ false,
/* flushToXLog */ false);
}
else
{
/*
* Abort case.
*/
if (entry->didIncrementCommitCount)
{
int systemAppendOnlyCommitWorkCount;
LWLockAcquire(FileRepAppendOnlyCommitCountLock, LW_EXCLUSIVE);
systemAppendOnlyCommitWorkCount =
FileRepPrimary_FinishedAppendOnlyCommitWork(1);
if (entry->isDistributedTransaction)
{
PrepareDecrAppendOnlyCommitWork(entry->gid);
}
if (Debug_persistent_print ||
Debug_persistent_appendonly_commit_count_print)
elog(Persistent_DebugPrintLevel(),
"Storage Manager: Append-Only Mirror Resync EOFs decrementing commit work for aborted transaction "
"(system count %d). "
"Relation %u/%u/%u, segment file #%d (persistent serial num " INT64_FORMAT ", TID %s) ",
systemAppendOnlyCommitWorkCount,
entry->key.relFileNode.spcNode,
entry->key.relFileNode.dbNode,
entry->key.relFileNode.relNode,
entry->key.segmentFileNum,
entry->persistentSerialNum,
ItemPointerToString(&entry->persistentTid));
pendingAppendOnlyMirrorResyncIntentCount--;
LWLockRelease(FileRepAppendOnlyCommitCountLock);
}
}
if (Debug_persistent_print ||
Debug_persistent_appendonly_commit_count_print)
elog(Persistent_DebugPrintLevel(),
"Storage Manager: Append-Only mirror resync eofs list entry #%d: %u/%u/%u, segment file #%d, relation name '%s' "
"(forCommit %s, persistent TID %s, persistent serial number " INT64_FORMAT ", mirror catchup required %s, mirror new EOF " INT64_FORMAT ")",
appendOnlyMirrorResyncEofsCount,
entry->key.relFileNode.spcNode,
entry->key.relFileNode.dbNode,
entry->key.relFileNode.relNode,
entry->key.segmentFileNum,
(entry->relationName == NULL ? "<null>" : entry->relationName),
(forCommit ? "true" : "false"),
ItemPointerToString(&entry->persistentTid),
entry->persistentSerialNum,
(entry->mirrorCatchupRequired ? "true" : "false"),
entry->mirrorNewEof);
appendOnlyMirrorResyncEofsCount++;
}
//.........这里部分代码省略.........
示例10: SyncRepWaitForLSN
/*
* Wait for synchronous replication, if requested by user.
*
* Initially backends start in state SYNC_REP_NOT_WAITING and then
* change that state to SYNC_REP_WAITING before adding ourselves
* to the wait queue. During SyncRepWakeQueue() a WALSender changes
* the state to SYNC_REP_WAIT_COMPLETE once replication is confirmed.
* This backend then resets its state to SYNC_REP_NOT_WAITING.
*/
void
SyncRepWaitForLSN(XLogRecPtr XactCommitLSN)
{
char *new_status = NULL;
const char *old_status;
/*
* Fast exit if user has not requested sync replication, or
* there are no sync replication standby names defined.
* Note that those standbys don't need to be connected.
*/
if (!SyncRepRequested() || !SyncStandbysDefined())
return;
Assert(SHMQueueIsDetached(&(MyProc->syncRepLinks)));
Assert(WalSndCtl != NULL);
/* Reset the latch before adding ourselves to the queue. */
ResetLatch(&MyProc->waitLatch);
/*
* Set our waitLSN so WALSender will know when to wake us, and add
* ourselves to the queue.
*/
LWLockAcquire(SyncRepLock, LW_EXCLUSIVE);
Assert(MyProc->syncRepState == SYNC_REP_NOT_WAITING);
if (!WalSndCtl->sync_standbys_defined)
{
/*
* We don't wait for sync rep if WalSndCtl->sync_standbys_defined is
* not set. See SyncRepUpdateSyncStandbysDefined.
*/
LWLockRelease(SyncRepLock);
return;
}
MyProc->waitLSN = XactCommitLSN;
MyProc->syncRepState = SYNC_REP_WAITING;
SyncRepQueueInsert();
Assert(SyncRepQueueIsOrderedByLSN());
LWLockRelease(SyncRepLock);
/* Alter ps display to show waiting for sync rep. */
if (update_process_title)
{
int len;
old_status = get_ps_display(&len);
new_status = (char *) palloc(len + 32 + 1);
memcpy(new_status, old_status, len);
sprintf(new_status + len, " waiting for %X/%X",
XactCommitLSN.xlogid, XactCommitLSN.xrecoff);
set_ps_display(new_status, false);
new_status[len] = '\0'; /* truncate off " waiting ..." */
}
/*
* Wait for specified LSN to be confirmed.
*
* Each proc has its own wait latch, so we perform a normal latch
* check/wait loop here.
*/
for (;;)
{
int syncRepState;
/*
* Wait on latch for up to 60 seconds. This allows us to
* check for postmaster death regularly while waiting.
* Note that timeout here does not necessarily release from loop.
*/
WaitLatch(&MyProc->waitLatch, 60000000L);
/* Must reset the latch before testing state. */
ResetLatch(&MyProc->waitLatch);
/*
* Try checking the state without the lock first. There's no guarantee
* that we'll read the most up-to-date value, so if it looks like we're
* still waiting, recheck while holding the lock. But if it looks like
* we're done, we must really be done, because once walsender changes
* the state to SYNC_REP_WAIT_COMPLETE, it will never update it again,
* so we can't be seeing a stale value in that case.
*/
syncRepState = MyProc->syncRepState;
if (syncRepState == SYNC_REP_WAITING)
{
LWLockAcquire(SyncRepLock, LW_SHARED);
syncRepState = MyProc->syncRepState;
LWLockRelease(SyncRepLock);
}
if (syncRepState == SYNC_REP_WAIT_COMPLETE)
//.........这里部分代码省略.........
示例11: dbms_alert_defered_signal
Datum
dbms_alert_defered_signal(PG_FUNCTION_ARGS)
{
TriggerData *trigdata = (TriggerData *) fcinfo->context;
TupleDesc tupdesc;
HeapTuple rettuple;
char *relname;
text *name;
text *message;
int event_col;
int message_col;
Datum datum;
bool isnull;
int cycle = 0;
float8 endtime;
float8 timeout = 2;
if (!CALLED_AS_TRIGGER(fcinfo))
ereport(ERROR,
(errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION),
errmsg("not called by trigger manager")));
if (!TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
ereport(ERROR,
(errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION),
errmsg("not called on valid event")));
if (SPI_connect() < 0)
ereport(ERROR,
(errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION),
errmsg("SPI_connect failed")));
if (strcmp((relname = SPI_getrelname(trigdata->tg_relation)), "ora_alerts") != 0)
ereport(ERROR,
(errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION),
errmsg("not called with valid relation")));
rettuple = trigdata->tg_trigtuple;
tupdesc = trigdata->tg_relation->rd_att;
if (SPI_ERROR_NOATTRIBUTE == (event_col = SPI_fnumber(tupdesc, "event")))
ereport(ERROR,
(errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION),
errmsg("attribute event not found")));
if (SPI_ERROR_NOATTRIBUTE == (message_col = SPI_fnumber(tupdesc, "message")))
ereport(ERROR,
(errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION),
errmsg("attribute message not found")));
datum = SPI_getbinval(rettuple, tupdesc, event_col, &isnull);
if (isnull)
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
errmsg("event name is NULL"),
errdetail("Eventname may not be NULL.")));
name = DatumGetTextP(datum);
datum = SPI_getbinval(rettuple, tupdesc, message_col, &isnull);
if (isnull)
message = NULL;
else
message = DatumGetTextP(datum);
WATCH_PRE(timeout, endtime, cycle);
if (ora_lock_shmem(SHMEMMSGSZ, MAX_PIPES, MAX_EVENTS, MAX_LOCKS, false))
{
ItemPointer tid;
Oid argtypes[1] = {TIDOID};
char nulls[1] = {' '};
Datum values[1];
void *plan;
create_message(name, message);
LWLockRelease(shmem_lock);
tid = &rettuple->t_data->t_ctid;
if (!(plan = SPI_prepare("DELETE FROM ora_alerts WHERE ctid = $1", 1, argtypes)))
ereport(ERROR,
(errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION),
errmsg("SPI_prepare failed")));
values[0] = ItemPointerGetDatum(tid);
if (SPI_OK_DELETE != SPI_execute_plan(plan, values, nulls, false, 1))
ereport(ERROR,
(errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION),
errmsg("can't execute sql")));
SPI_finish();
return PointerGetDatum(rettuple);
}
WATCH_POST(timeout, endtime, cycle);
LOCK_ERROR();
PG_RETURN_NULL();
}
示例12: SyncRepReleaseWaiters
/*
* Update the LSNs on each queue based upon our latest state. This
* implements a simple policy of first-valid-standby-releases-waiter.
*
* Other policies are possible, which would change what we do here and what
* perhaps also which information we store as well.
*/
void
SyncRepReleaseWaiters(void)
{
volatile WalSndCtlData *walsndctl = WalSndCtl;
volatile WalSnd *syncWalSnd = NULL;
int numprocs = 0;
int priority = 0;
int i;
/*
* If this WALSender is serving a standby that is not on the list of
* potential standbys then we have nothing to do. If we are still
* starting up or still running base backup, then leave quickly also.
*/
if (MyWalSnd->sync_standby_priority == 0 ||
MyWalSnd->state < WALSNDSTATE_STREAMING)
return;
/*
* We're a potential sync standby. Release waiters if we are the
* highest priority standby. If there are multiple standbys with
* same priorities then we use the first mentioned standby.
* If you change this, also change pg_stat_get_wal_senders().
*/
LWLockAcquire(SyncRepLock, LW_EXCLUSIVE);
for (i = 0; i < max_wal_senders; i++)
{
/* use volatile pointer to prevent code rearrangement */
volatile WalSnd *walsnd = &walsndctl->walsnds[i];
if (walsnd->pid != 0 &&
walsnd->sync_standby_priority > 0 &&
(priority == 0 ||
priority > walsnd->sync_standby_priority))
{
priority = walsnd->sync_standby_priority;
syncWalSnd = walsnd;
}
}
/*
* We should have found ourselves at least.
*/
Assert(syncWalSnd);
/*
* If we aren't managing the highest priority standby then just leave.
*/
if (syncWalSnd != MyWalSnd)
{
LWLockRelease(SyncRepLock);
announce_next_takeover = true;
return;
}
if (XLByteLT(walsndctl->lsn, MyWalSnd->flush))
{
/*
* Set the lsn first so that when we wake backends they will
* release up to this location.
*/
walsndctl->lsn = MyWalSnd->flush;
numprocs = SyncRepWakeQueue(false);
}
LWLockRelease(SyncRepLock);
elog(DEBUG3, "released %d procs up to %X/%X",
numprocs,
MyWalSnd->flush.xlogid,
MyWalSnd->flush.xrecoff);
/*
* If we are managing the highest priority standby, though we weren't
* prior to this, then announce we are now the sync standby.
*/
if (announce_next_takeover)
{
announce_next_takeover = false;
ereport(LOG,
(errmsg("standby \"%s\" is now the synchronous standby with priority %u",
application_name, MyWalSnd->sync_standby_priority)));
}
}
示例13: ShmemInitStruct
/*
* ShmemInitStruct -- Create/attach to a structure in shared memory.
*
* This is called during initialization to find or allocate
* a data structure in shared memory. If no other process
* has created the structure, this routine allocates space
* for it. If it exists already, a pointer to the existing
* structure is returned.
*
* Returns: pointer to the object. *foundPtr is set TRUE if the object was
* already in the shmem index (hence, already initialized).
*
* Note: before Postgres 9.0, this function returned NULL for some failure
* cases. Now, it always throws error instead, so callers need not check
* for NULL.
*/
void *
ShmemInitStruct(const char *name, Size size, bool *foundPtr)
{
ShmemIndexEnt *result;
void *structPtr;
LWLockAcquire(ShmemIndexLock, LW_EXCLUSIVE);
if (!ShmemIndex)
{
PGShmemHeader *shmemseghdr = ShmemSegHdr;
/* Must be trying to create/attach to ShmemIndex itself */
Assert(strcmp(name, "ShmemIndex") == 0);
if (IsUnderPostmaster)
{
/* Must be initializing a (non-standalone) backend */
Assert(shmemseghdr->index != NULL);
structPtr = shmemseghdr->index;
*foundPtr = TRUE;
}
else
{
/*
* If the shmem index doesn't exist, we are bootstrapping: we must
* be trying to init the shmem index itself.
*
* Notice that the ShmemIndexLock is released before the shmem
* index has been initialized. This should be OK because no other
* process can be accessing shared memory yet.
*/
Assert(shmemseghdr->index == NULL);
structPtr = ShmemAlloc(size);
shmemseghdr->index = structPtr;
*foundPtr = FALSE;
}
LWLockRelease(ShmemIndexLock);
return structPtr;
}
/* look it up in the shmem index */
result = (ShmemIndexEnt *)
hash_search(ShmemIndex, name, HASH_ENTER_NULL, foundPtr);
if (!result)
{
LWLockRelease(ShmemIndexLock);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("could not create ShmemIndex entry for data structure \"%s\"",
name)));
}
if (*foundPtr)
{
/*
* Structure is in the shmem index so someone else has allocated it
* already. The size better be the same as the size we are trying to
* initialize to, or there is a name conflict (or worse).
*/
if (result->size != size)
{
LWLockRelease(ShmemIndexLock);
ereport(ERROR,
(errmsg("ShmemIndex entry size is wrong for data structure"
" \"%s\": expected %zu, actual %zu",
name, size, result->size)));
}
structPtr = result->location;
}
else
{
/* It isn't in the table yet. allocate and initialize it */
structPtr = ShmemAllocNoError(size);
if (structPtr == NULL)
{
/* out of memory; remove the failed ShmemIndex entry */
hash_search(ShmemIndex, name, HASH_REMOVE, NULL);
LWLockRelease(ShmemIndexLock);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("not enough shared memory for data structure"
" \"%s\" (%zu bytes requested)",
//.........这里部分代码省略.........
示例14: SlruInternalWritePage
/*
* Write a page from a shared buffer, if necessary.
* Does nothing if the specified slot is not dirty.
*
* NOTE: only one write attempt is made here. Hence, it is possible that
* the page is still dirty at exit (if someone else re-dirtied it during
* the write). However, we *do* attempt a fresh write even if the page
* is already being written; this is for checkpoints.
*
* Control lock must be held at entry, and will be held at exit.
*/
static void
SlruInternalWritePage(SlruCtl ctl, int slotno, SlruFlush fdata)
{
SlruShared shared = ctl->shared;
int pageno = shared->page_number[slotno];
bool ok;
/* If a write is in progress, wait for it to finish */
while (shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS &&
shared->page_number[slotno] == pageno)
{
SimpleLruWaitIO(ctl, slotno);
}
/*
* Do nothing if page is not dirty, or if buffer no longer contains the
* same page we were called for.
*/
if (!shared->page_dirty[slotno] ||
shared->page_status[slotno] != SLRU_PAGE_VALID ||
shared->page_number[slotno] != pageno)
return;
/*
* Mark the slot write-busy, and clear the dirtybit. After this point, a
* transaction status update on this page will mark it dirty again.
*/
shared->page_status[slotno] = SLRU_PAGE_WRITE_IN_PROGRESS;
shared->page_dirty[slotno] = false;
/* Acquire per-buffer lock (cannot deadlock, see notes at top) */
LWLockAcquire(shared->buffer_locks[slotno], LW_EXCLUSIVE);
/* Release control lock while doing I/O */
LWLockRelease(shared->ControlLock);
/* Do the write */
ok = SlruPhysicalWritePage(ctl, pageno, slotno, fdata);
/* If we failed, and we're in a flush, better close the files */
if (!ok && fdata)
{
int i;
for (i = 0; i < fdata->num_files; i++)
close(fdata->fd[i]);
}
/* Re-acquire control lock and update page state */
LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
Assert(shared->page_number[slotno] == pageno &&
shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS);
/* If we failed to write, mark the page dirty again */
if (!ok)
shared->page_dirty[slotno] = true;
shared->page_status[slotno] = SLRU_PAGE_VALID;
LWLockRelease(shared->buffer_locks[slotno]);
/* Now it's okay to ereport if we failed */
if (!ok)
SlruReportIOError(ctl, pageno, InvalidTransactionId);
}
示例15: pg_stat_get_wal_senders
/*
* Returns activity of walsenders, including pids and xlog locations sent to
* standby servers.
*/
Datum
pg_stat_get_wal_senders(PG_FUNCTION_ARGS)
{
#define PG_STAT_GET_WAL_SENDERS_COLS 8
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
TupleDesc tupdesc;
Tuplestorestate *tupstore;
MemoryContext per_query_ctx;
MemoryContext oldcontext;
int *sync_priority;
int priority = 0;
int sync_standby = -1;
int i;
/* check to see if caller supports us returning a tuplestore */
if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("set-valued function called in context that cannot accept a set")));
if (!(rsinfo->allowedModes & SFRM_Materialize))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("materialize mode required, but it is not " \
"allowed in this context")));
/* Build a tuple descriptor for our result type */
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
elog(ERROR, "return type must be a row type");
per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
oldcontext = MemoryContextSwitchTo(per_query_ctx);
tupstore = tuplestore_begin_heap(true, false, work_mem);
rsinfo->returnMode = SFRM_Materialize;
rsinfo->setResult = tupstore;
rsinfo->setDesc = tupdesc;
MemoryContextSwitchTo(oldcontext);
/*
* Get the priorities of sync standbys all in one go, to minimise lock
* acquisitions and to allow us to evaluate who is the current sync
* standby. This code must match the code in SyncRepReleaseWaiters().
*/
sync_priority = palloc(sizeof(int) * max_wal_senders);
LWLockAcquire(SyncRepLock, LW_SHARED);
for (i = 0; i < max_wal_senders; i++)
{
/* use volatile pointer to prevent code rearrangement */
volatile WalSnd *walsnd = &WalSndCtl->walsnds[i];
if (walsnd->pid != 0)
{
sync_priority[i] = walsnd->sync_standby_priority;
if (walsnd->state == WALSNDSTATE_STREAMING &&
walsnd->sync_standby_priority > 0 &&
(priority == 0 ||
priority > walsnd->sync_standby_priority))
{
priority = walsnd->sync_standby_priority;
sync_standby = i;
}
}
}
LWLockRelease(SyncRepLock);
for (i = 0; i < max_wal_senders; i++)
{
/* use volatile pointer to prevent code rearrangement */
volatile WalSnd *walsnd = &WalSndCtl->walsnds[i];
char location[MAXFNAMELEN];
XLogRecPtr sentPtr;
XLogRecPtr write;
XLogRecPtr flush;
XLogRecPtr apply;
WalSndState state;
Datum values[PG_STAT_GET_WAL_SENDERS_COLS];
bool nulls[PG_STAT_GET_WAL_SENDERS_COLS];
if (walsnd->pid == 0)
continue;
SpinLockAcquire(&walsnd->mutex);
sentPtr = walsnd->sentPtr;
state = walsnd->state;
write = walsnd->write;
flush = walsnd->flush;
apply = walsnd->apply;
SpinLockRelease(&walsnd->mutex);
memset(nulls, 0, sizeof(nulls));
values[0] = Int32GetDatum(walsnd->pid);
if (!superuser())
{
//.........这里部分代码省略.........