本文整理汇总了C++中LWLockAcquire函数的典型用法代码示例。如果您正苦于以下问题:C++ LWLockAcquire函数的具体用法?C++ LWLockAcquire怎么用?C++ LWLockAcquire使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了LWLockAcquire函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: _bt_blwritepage
/*
* emit a completed btree page, and release the working storage.
*/
static void
_bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno)
{
// Fetch gp_persistent_relation_node information that will be added to XLOG record.
RelationFetchGpRelationNodeForXLog(wstate->index);
/* Ensure rd_smgr is open (could have been closed by relcache flush!) */
RelationOpenSmgr(wstate->index);
/* XLOG stuff */
if (wstate->btws_use_wal)
{
_bt_lognewpage(wstate->index, page, blkno);
}
else
{
/* Leave the page LSN zero if not WAL-logged, but set TLI anyway */
PageSetTLI(page, ThisTimeLineID);
}
/*
* If we have to write pages nonsequentially, fill in the space with
* zeroes until we come back and overwrite. This is not logically
* necessary on standard Unix filesystems (unwritten space will read as
* zeroes anyway), but it should help to avoid fragmentation. The dummy
* pages aren't WAL-logged though.
*/
while (blkno > wstate->btws_pages_written)
{
if (!wstate->btws_zeropage)
wstate->btws_zeropage = (Page) palloc0(BLCKSZ);
// -------- MirroredLock ----------
// UNDONE: Unfortunately, I think we write temp relations to the mirror...
LWLockAcquire(MirroredLock, LW_SHARED);
smgrextend(wstate->index->rd_smgr, wstate->btws_pages_written++,
(char *) wstate->btws_zeropage,
true);
LWLockRelease(MirroredLock);
// -------- MirroredLock ----------
}
// -------- MirroredLock ----------
// UNDONE: Unfortunately, I think we write temp relations to the mirror...
LWLockAcquire(MirroredLock, LW_SHARED);
/*
* Now write the page. We say isTemp = true even if it's not a temp
* index, because there's no need for smgr to schedule an fsync for this
* write; we'll do it ourselves before ending the build.
*/
if (blkno == wstate->btws_pages_written)
{
/* extending the file... */
smgrextend(wstate->index->rd_smgr, blkno, (char *) page, true);
wstate->btws_pages_written++;
}
else
{
/* overwriting a block we zero-filled before */
smgrwrite(wstate->index->rd_smgr, blkno, (char *) page, true);
}
LWLockRelease(MirroredLock);
// -------- MirroredLock ----------
pfree(page);
}
示例2: ReplicationSlotCreate
/*
* Create a new___ replication slot and mark it as used by this backend.
*
* name: Name of the slot
* db_specific: logical decoding is db specific; if the slot is going to
* be used for that pass true, otherwise false.
*/
void
ReplicationSlotCreate(const char *name, bool db_specific,
ReplicationSlotPersistency persistency)
{
ReplicationSlot *slot = NULL;
int i;
Assert(MyReplicationSlot == NULL);
ReplicationSlotValidateName(name, ERROR);
/*
* If some other backend ran this code currently with us, we'd likely both
* allocate the same slot, and that would be bad. We'd also be at risk of
* missing a name collision. Also, we don't want to try to create a new___
* slot while somebody's busy cleaning up an old one, because we might
* both be monkeying with the same directory.
*/
LWLockAcquire(ReplicationSlotAllocationLock, LW_EXCLUSIVE);
/*
* Check for name collision, and identify an allocatable slot. We need to
* hold ReplicationSlotControlLock in shared mode for this, so that nobody
* else can change the in_use flags while we're looking at them.
*/
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
for (i = 0; i < max_replication_slots; i++)
{
ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
if (s->in_use && strcmp(name, NameStr(s->data.name)) == 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("replication slot \"%s\" already exists", name)));
if (!s->in_use && slot == NULL)
slot = s;
}
LWLockRelease(ReplicationSlotControlLock);
/* If all slots are in use, we're out of luck. */
if (slot == NULL)
ereport(ERROR,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
errmsg("all replication slots are in use"),
errhint("Free one or increase max_replication_slots.")));
/*
* Since this slot is not in use, nobody should be looking at any part of
* it other than the in_use field unless they're trying to allocate it.
* And since we hold ReplicationSlotAllocationLock, nobody except us can
* be doing that. So it's safe to initialize the slot.
*/
Assert(!slot->in_use);
Assert(slot->active_pid == 0);
slot->data.persistency = persistency;
slot->data.xmin = InvalidTransactionId;
slot->effective_xmin = InvalidTransactionId;
StrNCpy(NameStr(slot->data.name), name, NAMEDATALEN);
slot->data.database = db_specific ? MyDatabaseId : InvalidOid;
slot->data.restart_lsn = InvalidXLogRecPtr;
/*
* Create the slot on disk. We haven't actually marked the slot allocated
* yet, so no special cleanup is required if this errors out.
*/
CreateSlotOnDisk(slot);
/*
* We need to briefly prevent any other backend from iterating over the
* slots while we flip the in_use flag. We also need to set the active
* flag while holding the ControlLock as otherwise a concurrent
* SlotAcquire() could acquire the slot as well.
*/
LWLockAcquire(ReplicationSlotControlLock, LW_EXCLUSIVE);
slot->in_use = true;
/* We can now mark the slot active, and that makes it our slot. */
{
volatile ReplicationSlot *vslot = slot;
SpinLockAcquire(&slot->mutex);
Assert(vslot->active_pid == 0);
vslot->active_pid = MyProcPid;
SpinLockRelease(&slot->mutex);
MyReplicationSlot = slot;
}
LWLockRelease(ReplicationSlotControlLock);
/*
* Now that the slot has been marked as in_use and in_active, it's safe to
* let somebody else try to allocate a slot.
//.........这里部分代码省略.........
示例3: SaveSlotToPath
/*
* Shared functionality between saving and creating a replication slot.
*/
static void
SaveSlotToPath(ReplicationSlot *slot, const char *dir, int elevel)
{
char tmppath[MAXPGPATH];
char path[MAXPGPATH];
int fd;
ReplicationSlotOnDisk cp;
bool was_dirty;
/* first check whether there's something to write out */
{
volatile ReplicationSlot *vslot = slot;
SpinLockAcquire(&vslot->mutex);
was_dirty = vslot->dirty;
vslot->just_dirtied = false;
SpinLockRelease(&vslot->mutex);
}
/* and don't do anything if there's nothing to write */
if (!was_dirty)
return;
LWLockAcquire(slot->io_in_progress_lock, LW_EXCLUSIVE);
/* silence valgrind :( */
memset(&cp, 0, sizeof(ReplicationSlotOnDisk));
sprintf(tmppath, "%s/state.tmp", dir);
sprintf(path, "%s/state", dir);
fd = OpenTransientFile(tmppath,
O_CREAT | O_EXCL | O_WRONLY | PG_BINARY,
S_IRUSR | S_IWUSR);
if (fd < 0)
{
ereport(elevel,
(errcode_for_file_access(),
errmsg("could not create file \"%s\": %m",
tmppath)));
return;
}
cp.magic = SLOT_MAGIC;
INIT_CRC32C(cp.checksum);
cp.version = SLOT_VERSION;
cp.length = ReplicationSlotOnDiskV2Size;
SpinLockAcquire(&slot->mutex);
memcpy(&cp.slotdata, &slot->data, sizeof(ReplicationSlotPersistentData));
SpinLockRelease(&slot->mutex);
COMP_CRC32C(cp.checksum,
(char *) (&cp) + SnapBuildOnDiskNotChecksummedSize,
SnapBuildOnDiskChecksummedSize);
FIN_CRC32C(cp.checksum);
if ((write(fd, &cp, sizeof(cp))) != sizeof(cp))
{
int save_errno = errno;
CloseTransientFile(fd);
errno = save_errno;
ereport(elevel,
(errcode_for_file_access(),
errmsg("could not write to file \"%s\": %m",
tmppath)));
return;
}
/* fsync the temporary file */
if (pg_fsync(fd) != 0)
{
int save_errno = errno;
CloseTransientFile(fd);
errno = save_errno;
ereport(elevel,
(errcode_for_file_access(),
errmsg("could not fsync file \"%s\": %m",
tmppath)));
return;
}
CloseTransientFile(fd);
/* rename to permanent file, fsync file and directory */
if (rename(tmppath, path) != 0)
{
ereport(elevel,
(errcode_for_file_access(),
errmsg("could not rename file \"%s\" to \"%s\": %m",
tmppath, path)));
return;
}
//.........这里部分代码省略.........
示例4: StrategyGetBuffer
/*
* StrategyGetBuffer
*
* Called by the bufmgr to get the next candidate buffer to use in
* BufferAlloc(). The only hard requirement BufferAlloc() has is that
* the selected buffer must not currently be pinned by anyone.
*
* strategy is a BufferAccessStrategy object, or NULL for default strategy.
*
* To ensure that no one else can pin the buffer before we do, we must
* return the buffer with the buffer header spinlock still held. If
* *lock_held is set on exit, we have returned with the BufFreelistLock
* still held, as well; the caller must release that lock once the spinlock
* is dropped. We do it that way because releasing the BufFreelistLock
* might awaken other processes, and it would be bad to do the associated
* kernel calls while holding the buffer header spinlock.
*/
volatile BufferDesc *
StrategyGetBuffer(BufferAccessStrategy strategy, bool *lock_held)
{
volatile BufferDesc *buf;
int trycounter;
/*
* If given a strategy object, see whether it can select a buffer. We
* assume strategy objects don't need the BufFreelistLock.
*/
if (strategy != NULL)
{
buf = GetBufferFromRing(strategy);
if (buf != NULL)
{
*lock_held = false;
return buf;
}
}
/* Nope, so lock the freelist */
*lock_held = true;
LWLockAcquire(BufFreelistLock, LW_EXCLUSIVE);
/*
* We count buffer allocation requests so that the bgwriter can estimate
* the rate of buffer consumption. Note that buffers recycled by a
* strategy object are intentionally not counted here.
*/
StrategyControl->numBufferAllocs++;
/*
* Try to get a buffer from the freelist. Note that the freeNext fields
* are considered to be protected by the BufFreelistLock not the
* individual buffer spinlocks, so it's OK to manipulate them without
* holding the spinlock.
*/
while (StrategyControl->firstFreeBuffer >= 0)
{
buf = &BufferDescriptors[StrategyControl->firstFreeBuffer];
Assert(buf->freeNext != FREENEXT_NOT_IN_LIST);
/* Unconditionally remove buffer from freelist */
StrategyControl->firstFreeBuffer = buf->freeNext;
buf->freeNext = FREENEXT_NOT_IN_LIST;
/*
* If the buffer is pinned or has a nonzero usage_count, we cannot use
* it; discard it and retry. (This can only happen if VACUUM put a
* valid buffer in the freelist and then someone else used it before
* we got to it. It's probably impossible altogether as of 8.3, but
* we'd better check anyway.)
*/
LockBufHdr(buf);
if (buf->refcount == 0 && buf->usage_count == 0)
{
if (strategy != NULL)
AddBufferToRing(strategy, buf);
return buf;
}
UnlockBufHdr(buf);
}
/* Nothing on the freelist, so run the "clock sweep" algorithm */
trycounter = NBuffers;
for (;;)
{
buf = &BufferDescriptors[StrategyControl->nextVictimBuffer];
if (++StrategyControl->nextVictimBuffer >= NBuffers)
{
StrategyControl->nextVictimBuffer = 0;
StrategyControl->completePasses++;
}
/*
* If the buffer is pinned or has a nonzero usage_count, we cannot use
* it; decrement the usage_count (unless pinned) and keep scanning.
*/
LockBufHdr(buf);
if (buf->refcount == 0)
{
if (buf->usage_count > 0)
//.........这里部分代码省略.........
示例5: DtmXactCallback
static void
DtmXactCallback(XactEvent event, void *arg)
{
//XTM_INFO("%d: DtmXactCallbackevent=%d nextxid=%d\n", getpid(), event, DtmNextXid);
switch (event)
{
case XACT_EVENT_START:
//XTM_INFO("%d: normal=%d, initialized=%d, replication=%d, bgw=%d, vacuum=%d\n",
// getpid(), IsNormalProcessingMode(), dtm->initialized, MMDoReplication, IsBackgroundWorker, IsAutoVacuumWorkerProcess());
if (IsNormalProcessingMode() && dtm->initialized && MMDoReplication && !am_walsender && !IsBackgroundWorker && !IsAutoVacuumWorkerProcess()) {
MMBeginTransaction();
}
break;
#if 0
case XACT_EVENT_PRE_COMMIT:
case XACT_EVENT_PARALLEL_PRE_COMMIT:
{
TransactionId xid = GetCurrentTransactionIdIfAny();
if (!MMIsDistributedTrans && TransactionIdIsValid(xid)) {
XTM_INFO("%d: Will ignore transaction %u\n", getpid(), xid);
MMMarkTransAsLocal(xid);
}
break;
}
#endif
case XACT_EVENT_COMMIT:
case XACT_EVENT_ABORT:
if (TransactionIdIsValid(DtmNextXid))
{
if (!DtmVoted) {
ArbiterSetTransStatus(DtmNextXid, TRANSACTION_STATUS_ABORTED, false);
}
if (event == XACT_EVENT_COMMIT)
{
/*
* Now transaction status is already written in CLOG,
* so we can remove information about it from hash table
*/
LWLockAcquire(dtm->hashLock, LW_EXCLUSIVE);
hash_search(xid_in_doubt, &DtmNextXid, HASH_REMOVE, NULL);
LWLockRelease(dtm->hashLock);
}
#if 0 /* should be handled now using DtmVoted flag */
else
{
/*
* Transaction at the node can be aborted because of transaction failure at some other node
* before it starts doing anything and assigned Xid, in this case Postgres is not calling SetTransactionStatus,
* so we have to send report to DTMD here
*/
if (!TransactionIdIsValid(GetCurrentTransactionIdIfAny())) {
XTM_INFO("%d: abort transation on DTMD\n", getpid());
ArbiterSetTransStatus(DtmNextXid, TRANSACTION_STATUS_ABORTED, false);
}
}
#endif
DtmNextXid = InvalidTransactionId;
DtmLastSnapshot = NULL;
}
MMIsDistributedTrans = false;
break;
default:
break;
}
}
示例6: ShmemInitStruct
/*
* ShmemInitStruct -- Create/attach to a structure in shared memory.
*
* This is called during initialization to find or allocate
* a data structure in shared memory. If no other process
* has created the structure, this routine allocates space
* for it. If it exists already, a pointer to the existing
* structure is returned.
*
* Returns: pointer to the object. *foundPtr is set TRUE if the object was
* already in the shmem index (hence, already initialized).
*
* Note: before Postgres 9.0, this function returned NULL for some failure
* cases. Now, it always throws error instead, so callers need not check
* for NULL.
*/
void *
ShmemInitStruct(const char *name, Size size, bool *foundPtr)
{
ShmemIndexEnt *result;
void *structPtr;
LWLockAcquire(ShmemIndexLock, LW_EXCLUSIVE);
if (!ShmemIndex)
{
PGShmemHeader *shmemseghdr = ShmemSegHdr;
/* Must be trying to create/attach to ShmemIndex itself */
Assert(strcmp(name, "ShmemIndex") == 0);
if (IsUnderPostmaster)
{
/* Must be initializing a (non-standalone) backend */
Assert(shmemseghdr->index != NULL);
structPtr = shmemseghdr->index;
*foundPtr = TRUE;
}
else
{
/*
* If the shmem index doesn't exist, we are bootstrapping: we must
* be trying to init the shmem index itself.
*
* Notice that the ShmemIndexLock is released before the shmem
* index has been initialized. This should be OK because no other
* process can be accessing shared memory yet.
*/
Assert(shmemseghdr->index == NULL);
structPtr = ShmemAlloc(size);
if (structPtr == NULL)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("not enough shared memory for data structure"
" \"%s\" (%zu bytes requested)",
name, size)));
shmemseghdr->index = structPtr;
*foundPtr = FALSE;
}
LWLockRelease(ShmemIndexLock);
return structPtr;
}
/* look it up in the shmem index */
result = (ShmemIndexEnt *)
hash_search(ShmemIndex, name, HASH_ENTER_NULL, foundPtr);
if (!result)
{
LWLockRelease(ShmemIndexLock);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("could not create ShmemIndex entry for data structure \"%s\"",
name)));
}
if (*foundPtr)
{
/*
* Structure is in the shmem index so someone else has allocated it
* already. The size better be the same as the size we are trying to
* initialize to, or there is a name conflict (or worse).
*/
if (result->size != size)
{
LWLockRelease(ShmemIndexLock);
ereport(ERROR,
(errmsg("ShmemIndex entry size is wrong for data structure"
" \"%s\": expected %zu, actual %zu",
name, size, result->size)));
}
structPtr = result->location;
}
else
{
/* It isn't in the table yet. allocate and initialize it */
structPtr = ShmemAlloc(size);
if (structPtr == NULL)
{
/* out of memory; remove the failed ShmemIndex entry */
//.........这里部分代码省略.........
示例7: CheckDeadLock
/*
* CheckDeadLock
*
* We only get to this routine if the DEADLOCK_TIMEOUT fired
* while waiting for a lock to be released by some other process. Look
* to see if there's a deadlock; if not, just return and continue waiting.
* (But signal ProcSleep to log a message, if log_lock_waits is true.)
* If we have a real deadlock, remove ourselves from the lock's wait queue
* and signal an error to ProcSleep.
*
* NB: this is run inside a signal handler, so be very wary about what is done
* here or in called routines.
*/
void
CheckDeadLock(void)
{
int i;
/*
* Acquire exclusive lock on the entire shared lock data structures. Must
* grab LWLocks in partition-number order to avoid LWLock deadlock.
*
* Note that the deadlock check interrupt had better not be enabled
* anywhere that this process itself holds lock partition locks, else this
* will wait forever. Also note that LWLockAcquire creates a critical
* section, so that this routine cannot be interrupted by cancel/die
* interrupts.
*/
for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
LWLockAcquire(FirstLockMgrLock + i, LW_EXCLUSIVE);
/*
* Check to see if we've been awoken by anyone in the interim.
*
* If we have, we can return and resume our transaction -- happy day.
* Before we are awoken the process releasing the lock grants it to us so
* we know that we don't have to wait anymore.
*
* We check by looking to see if we've been unlinked from the wait queue.
* This is quicker than checking our semaphore's state, since no kernel
* call is needed, and it is safe because we hold the lock partition lock.
*/
if (MyProc->links.prev == NULL ||
MyProc->links.next == NULL)
goto check_done;
#ifdef LOCK_DEBUG
if (Debug_deadlocks)
DumpAllLocks();
#endif
/* Run the deadlock check, and set deadlock_state for use by ProcSleep */
deadlock_state = DeadLockCheck(MyProc);
if (deadlock_state == DS_HARD_DEADLOCK)
{
/*
* Oops. We have a deadlock.
*
* Get this process out of wait state. (Note: we could do this more
* efficiently by relying on lockAwaited, but use this coding to
* preserve the flexibility to kill some other transaction than the
* one detecting the deadlock.)
*
* RemoveFromWaitQueue sets MyProc->waitStatus to STATUS_ERROR, so
* ProcSleep will report an error after we return from the signal
* handler.
*/
Assert(MyProc->waitLock != NULL);
RemoveFromWaitQueue(MyProc, LockTagHashCode(&(MyProc->waitLock->tag)));
/*
* Unlock my semaphore so that the interrupted ProcSleep() call can
* finish.
*/
PGSemaphoreUnlock(&MyProc->sem);
/*
* We're done here. Transaction abort caused by the error that
* ProcSleep will raise will cause any other locks we hold to be
* released, thus allowing other processes to wake up; we don't need
* to do that here. NOTE: an exception is that releasing locks we
* hold doesn't consider the possibility of waiters that were blocked
* behind us on the lock we just failed to get, and might now be
* wakable because we're not in front of them anymore. However,
* RemoveFromWaitQueue took care of waking up any such processes.
*/
}
else if (log_lock_waits || deadlock_state == DS_BLOCKED_BY_AUTOVACUUM)
{
/*
* Unlock my semaphore so that the interrupted ProcSleep() call can
* print the log message (we daren't do it here because we are inside
* a signal handler). It will then sleep again until someone releases
* the lock.
*
* If blocked by autovacuum, this wakeup will enable ProcSleep to send
* the canceling signal to the autovacuum worker.
*/
PGSemaphoreUnlock(&MyProc->sem);
//.........这里部分代码省略.........
示例8: SimpleLruWritePage
/*
* Write a page from a shared buffer, if necessary.
* Does nothing if the specified slot is not dirty.
*
* NOTE: only one write attempt is made here. Hence, it is possible that
* the page is still dirty at exit (if someone else re-dirtied it during
* the write). However, we *do* attempt a fresh write even if the page
* is already being written; this is for checkpoints.
*
* Control lock must be held at entry, and will be held at exit.
*/
void
SimpleLruWritePage(SlruCtl ctl, int slotno, SlruFlush fdata)
{
SlruShared shared = ctl->shared;
int pageno = shared->page_number[slotno];
bool ok;
/* If a write is in progress, wait for it to finish */
while (shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS &&
shared->page_number[slotno] == pageno)
{
SimpleLruWaitIO(ctl, slotno);
}
/*
* Do nothing if page is not dirty, or if buffer no longer contains the
* same page we were called for.
*/
if (!shared->page_dirty[slotno] ||
shared->page_status[slotno] != SLRU_PAGE_VALID ||
shared->page_number[slotno] != pageno)
return;
/*
* Mark the slot write-busy, and clear the dirtybit. After this point, a
* transaction status update on this page will mark it dirty again.
*/
shared->page_status[slotno] = SLRU_PAGE_WRITE_IN_PROGRESS;
shared->page_dirty[slotno] = false;
/* Acquire per-buffer lock (cannot deadlock, see notes at top) */
LWLockAcquire(shared->buffer_locks[slotno], LW_EXCLUSIVE);
/* Release control lock while doing I/O */
LWLockRelease(shared->ControlLock);
/* Do the write */
ok = SlruPhysicalWritePage(ctl, pageno, slotno, fdata);
/* If we failed, and we're in a flush, better close the files */
if (!ok && fdata)
{
int i;
for (i = 0; i < fdata->num_files; i++)
MirroredFlatFile_Close(&fdata->mirroredOpens[i]);
}
/* Re-acquire control lock and update page state */
LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
Assert(shared->page_number[slotno] == pageno &&
shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS);
/* If we failed to write, mark the page dirty again */
if (!ok)
shared->page_dirty[slotno] = true;
shared->page_status[slotno] = SLRU_PAGE_VALID;
LWLockRelease(shared->buffer_locks[slotno]);
/* Now it's okay to ereport if we failed */
if (!ok)
SlruReportIOError(ctl, pageno, InvalidTransactionId);
}
示例9: RemoveTableSpace
//.........这里部分代码省略.........
* is logically the same as checkSharedDependencies, however we don't
* actually track these in pg_shdepend, instead we lookup this information
* in the gp_persistent_database/relation_node tables.
*/
/* ... */
/*
* Remove the pg_tablespace tuple (this will roll back if we fail below)
*/
caql_delete_current(pcqCtx);
/*
* Remove any comments on this tablespace.
*/
DeleteSharedComments(tablespaceoid, TableSpaceRelationId);
/*
* Remove dependency on owner.
*
* If shared dependencies are added between filespace <=> tablespace
* they will be deleted as well.
*/
deleteSharedDependencyRecordsFor(TableSpaceRelationId, tablespaceoid);
/* MPP-6929: metadata tracking */
if (Gp_role == GP_ROLE_DISPATCH)
MetaTrackDropObject(TableSpaceRelationId,
tablespaceoid);
/*
* Acquire TablespaceCreateLock to ensure that no
* MirroredFileSysObj_JustInTimeDbDirCreate is running concurrently.
*/
LWLockAcquire(TablespaceCreateLock, LW_EXCLUSIVE);
/*
* Check for any relations still defined in the tablespace.
*/
PersistentRelation_CheckTablespace(tablespaceoid, &count, &relfilenode);
if (count > 0)
{
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("tablespace \"%s\" is not empty", tablespacename)));
}
/*
* Schedule the removal the physical infrastructure.
*
* Note: This only schedules the delete, the delete won't actually occur
* until after the transaction has comitted. This should however do
* everything it can to assure that the delete will occur sucessfully,
* e.g. check permissions etc.
*/
/*
* Schedule all persistent database directory removals for transaction commit.
*/
PersistentDatabase_DirIterateInit();
while (PersistentDatabase_DirIterateNext(
&dbDirNode,
&persistentState,
&persistentTid,
&persistentSerialNum))
{
if (dbDirNode.tablespace != tablespaceoid)
示例10: SimpleLruTruncate_internal
/*
* Remove all segments before the one holding the passed page number
*/
static void
SimpleLruTruncate_internal(SlruCtl ctl, int cutoffPage, bool lockHeld)
{
SlruShared shared = ctl->shared;
int slotno;
/*
* The cutoff point is the start of the segment containing cutoffPage.
*/
cutoffPage -= cutoffPage % SLRU_PAGES_PER_SEGMENT;
/*
* Scan shared memory and remove any pages preceding the cutoff page, to
* ensure we won't rewrite them later. (Since this is normally called in
* or just after a checkpoint, any dirty pages should have been flushed
* already ... we're just being extra careful here.)
*/
if (!lockHeld)
LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
restart:;
/*
* While we are holding the lock, make an important safety check: the
* planned cutoff point must be <= the current endpoint page. Otherwise we
* have already wrapped around, and proceeding with the truncation would
* risk removing the current segment.
*/
if (ctl->PagePrecedes(shared->latest_page_number, cutoffPage))
{
if (!lockHeld)
LWLockRelease(shared->ControlLock);
ereport(LOG,
(errmsg("could not truncate directory \"%s\": apparent wraparound",
ctl->Dir)));
return;
}
for (slotno = 0; slotno < shared->num_slots; slotno++)
{
if (shared->page_status[slotno] == SLRU_PAGE_EMPTY)
continue;
if (!ctl->PagePrecedes(shared->page_number[slotno], cutoffPage))
continue;
/*
* If page is clean, just change state to EMPTY (expected case).
*/
if (shared->page_status[slotno] == SLRU_PAGE_VALID &&
!shared->page_dirty[slotno])
{
shared->page_status[slotno] = SLRU_PAGE_EMPTY;
continue;
}
/*
* Hmm, we have (or may have) I/O operations acting on the page, so
* we've got to wait for them to finish and then start again. This is
* the same logic as in SlruSelectLRUPage. (XXX if page is dirty,
* wouldn't it be OK to just discard it without writing it? For now,
* keep the logic the same as it was.)
*/
if (shared->page_status[slotno] == SLRU_PAGE_VALID)
SimpleLruWritePage(ctl, slotno, NULL);
else
SimpleLruWaitIO(ctl, slotno);
goto restart;
}
if (!lockHeld)
LWLockRelease(shared->ControlLock);
/* Now we can remove the old segment(s) */
(void) SlruScanDirectory(ctl, cutoffPage, true);
}
示例11: SimpleLruReadPage_Internal
/*
* Find a page in a shared buffer, reading it in if necessary.
* The page number must correspond to an already-initialized page.
*
* The passed-in xid is used only for error reporting, and may be
* InvalidTransactionId if no specific xid is associated with the action.
*
* If the passed in pointer to valid is NULL, then log errors can be
* generated by this function. If valid is not NULL, then the function
* will not generate log errors, but will set the boolean value
* pointed to by valid to TRUE if it was able to read the page,
* or FALSE if the page read had error.
*
* Return value is the shared-buffer slot number now holding the page.
* The buffer's LRU access info is updated.
*
* Control lock must be held at entry, and will be held at exit.
*/
static int
SimpleLruReadPage_Internal(SlruCtl ctl, int pageno, TransactionId xid, bool *valid)
{
SlruShared shared = ctl->shared;
/* Outer loop handles restart if we must wait for someone else's I/O */
for (;;)
{
int slotno;
bool ok;
/* See if page already is in memory; if not, pick victim slot */
slotno = SlruSelectLRUPage(ctl, pageno);
/* Did we find the page in memory? */
if (shared->page_number[slotno] == pageno &&
shared->page_status[slotno] != SLRU_PAGE_EMPTY)
{
/* If page is still being read in, we must wait for I/O */
if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS)
{
SimpleLruWaitIO(ctl, slotno);
/* Now we must recheck state from the top */
continue;
}
/* Otherwise, it's ready to use */
SlruRecentlyUsed(shared, slotno);
if (valid != NULL)
*valid = true;
return slotno;
}
/* We found no match; assert we selected a freeable slot */
Assert(shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
(shared->page_status[slotno] == SLRU_PAGE_VALID &&
!shared->page_dirty[slotno]));
/* Mark the slot read-busy */
shared->page_number[slotno] = pageno;
shared->page_status[slotno] = SLRU_PAGE_READ_IN_PROGRESS;
shared->page_dirty[slotno] = false;
/* Acquire per-buffer lock (cannot deadlock, see notes at top) */
LWLockAcquire(shared->buffer_locks[slotno], LW_EXCLUSIVE);
/*
* Temporarily mark page as recently-used to discourage
* SlruSelectLRUPage from selecting it again for someone else.
*/
SlruRecentlyUsed(shared, slotno);
/* Release control lock while doing I/O */
LWLockRelease(shared->ControlLock);
/* Do the read */
ok = SlruPhysicalReadPage(ctl, pageno, slotno);
/* Re-acquire control lock and update page state */
LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
Assert(shared->page_number[slotno] == pageno &&
shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS &&
!shared->page_dirty[slotno]);
shared->page_status[slotno] = ok ? SLRU_PAGE_VALID : SLRU_PAGE_EMPTY;
LWLockRelease(shared->buffer_locks[slotno]);
/* Now it's okay to ereport if we failed */
if (!ok && valid == NULL)
SlruReportIOError(ctl, pageno, xid);
else if (valid != NULL)
{
if (!ok)
{
LWLockRelease(shared->ControlLock);
*valid = false;
return -1;
}
else
*valid = true;
}
//.........这里部分代码省略.........
示例12: SlruSelectLRUPage
/*
* Select the slot to re-use when we need a free slot.
*
* The target page number is passed because we need to consider the
* possibility that some other process reads in the target page while
* we are doing I/O to free a slot. Hence, check or recheck to see if
* any slot already holds the target page, and return that slot if so.
* Thus, the returned slot is *either* a slot already holding the pageno
* (could be any state except EMPTY), *or* a freeable slot (state EMPTY
* or CLEAN).
*
* Control lock must be held at entry, and will be held at exit.
*/
static int
SlruSelectLRUPage(SlruCtl ctl, int pageno)
{
SlruShared shared = ctl->shared;
/* Outer loop handles restart after I/O */
for (;;)
{
int slotno;
int bestslot = 0;
unsigned int bestcount = 0;
/* See if page already has a buffer assigned */
for (slotno = 0; slotno < NUM_SLRU_BUFFERS; slotno++)
{
if (shared->page_number[slotno] == pageno &&
shared->page_status[slotno] != SLRU_PAGE_EMPTY)
return slotno;
}
/*
* If we find any EMPTY slot, just select that one. Else locate the
* least-recently-used slot that isn't the latest page.
*/
for (slotno = 0; slotno < NUM_SLRU_BUFFERS; slotno++)
{
if (shared->page_status[slotno] == SLRU_PAGE_EMPTY)
return slotno;
if (shared->page_lru_count[slotno] > bestcount &&
shared->page_number[slotno] != shared->latest_page_number)
{
bestslot = slotno;
bestcount = shared->page_lru_count[slotno];
}
}
/*
* If the selected page is clean, we're set.
*/
if (shared->page_status[bestslot] == SLRU_PAGE_CLEAN)
return bestslot;
/*
* We need to do I/O. Normal case is that we have to write it out,
* but it's possible in the worst case to have selected a read-busy
* page. In that case we just wait for someone else to complete the
* I/O, which we can do by waiting for the per-buffer lock.
*/
if (shared->page_status[bestslot] == SLRU_PAGE_READ_IN_PROGRESS)
{
LWLockRelease(shared->ControlLock);
LWLockAcquire(shared->buffer_locks[bestslot], LW_SHARED);
LWLockRelease(shared->buffer_locks[bestslot]);
LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
}
else
SimpleLruWritePage(ctl, bestslot, NULL);
/*
* Now loop back and try again. This is the easiest way of dealing
* with corner cases such as the victim page being re-dirtied while we
* wrote it.
*/
}
}
示例13: SimpleLruWritePage
/*
* Write a page from a shared buffer, if necessary.
* Does nothing if the specified slot is not dirty.
*
* NOTE: only one write attempt is made here. Hence, it is possible that
* the page is still dirty at exit (if someone else re-dirtied it during
* the write). However, we *do* attempt a fresh write even if the page
* is already being written; this is for checkpoints.
*
* Control lock must be held at entry, and will be held at exit.
*/
void
SimpleLruWritePage(SlruCtl ctl, int slotno, SlruFlush fdata)
{
SlruShared shared = ctl->shared;
int pageno;
bool ok;
/* Do nothing if page does not need writing */
if (shared->page_status[slotno] != SLRU_PAGE_DIRTY &&
shared->page_status[slotno] != SLRU_PAGE_WRITE_IN_PROGRESS)
return;
pageno = shared->page_number[slotno];
/*
* We must grab the per-buffer lock to do I/O. To avoid deadlock, must
* release ControlLock while waiting for per-buffer lock. Fortunately,
* most of the time the per-buffer lock shouldn't be already held, so we
* can do this:
*/
if (!LWLockConditionalAcquire(shared->buffer_locks[slotno],
LW_EXCLUSIVE))
{
LWLockRelease(shared->ControlLock);
LWLockAcquire(shared->buffer_locks[slotno], LW_EXCLUSIVE);
LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
}
/*
* Check to see if someone else already did the write, or took the buffer
* away from us. If so, do nothing. NOTE: we really should never see
* WRITE_IN_PROGRESS here, since that state should only occur while the
* writer is holding the buffer lock. But accept it so that we have a
* recovery path if a writer aborts.
*/
if (shared->page_number[slotno] != pageno ||
(shared->page_status[slotno] != SLRU_PAGE_DIRTY &&
shared->page_status[slotno] != SLRU_PAGE_WRITE_IN_PROGRESS))
{
LWLockRelease(shared->buffer_locks[slotno]);
return;
}
/*
* Mark the slot write-busy. After this point, a transaction status
* update on this page will mark it dirty again.
*/
shared->page_status[slotno] = SLRU_PAGE_WRITE_IN_PROGRESS;
/* Okay, release the control lock and do the write */
LWLockRelease(shared->ControlLock);
ok = SlruPhysicalWritePage(ctl, pageno, slotno, fdata);
/* If we failed, and we're in a flush, better close the files */
if (!ok && fdata)
{
int i;
for (i = 0; i < fdata->num_files; i++)
close(fdata->fd[i]);
}
/* Re-acquire shared control lock and update page state */
LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
Assert(shared->page_number[slotno] == pageno &&
(shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS ||
shared->page_status[slotno] == SLRU_PAGE_DIRTY));
/* Cannot set CLEAN if someone re-dirtied page since write started */
if (shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS)
shared->page_status[slotno] = ok ? SLRU_PAGE_CLEAN : SLRU_PAGE_DIRTY;
LWLockRelease(shared->buffer_locks[slotno]);
/* Now it's okay to ereport if we failed */
if (!ok)
SlruReportIOError(ctl, pageno, InvalidTransactionId);
}
示例14: SimpleLruReadPage
/*
* Find a page in a shared buffer, reading it in if necessary.
* The page number must correspond to an already-initialized page.
*
* The passed-in xid is used only for error reporting, and may be
* InvalidTransactionId if no specific xid is associated with the action.
*
* Return value is the shared-buffer slot number now holding the page.
* The buffer's LRU access info is updated.
*
* Control lock must be held at entry, and will be held at exit.
*/
int
SimpleLruReadPage(SlruCtl ctl, int pageno, TransactionId xid)
{
SlruShared shared = ctl->shared;
/* Outer loop handles restart if we lose the buffer to someone else */
for (;;)
{
int slotno;
bool ok;
/* See if page already is in memory; if not, pick victim slot */
slotno = SlruSelectLRUPage(ctl, pageno);
/* Did we find the page in memory? */
if (shared->page_number[slotno] == pageno &&
shared->page_status[slotno] != SLRU_PAGE_EMPTY)
{
/* If page is still being read in, we cannot use it yet */
if (shared->page_status[slotno] != SLRU_PAGE_READ_IN_PROGRESS)
{
/* otherwise, it's ready to use */
SlruRecentlyUsed(shared, slotno);
return slotno;
}
}
else
{
/* We found no match; assert we selected a freeable slot */
Assert(shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
shared->page_status[slotno] == SLRU_PAGE_CLEAN);
}
/* Mark the slot read-busy (no-op if it already was) */
shared->page_number[slotno] = pageno;
shared->page_status[slotno] = SLRU_PAGE_READ_IN_PROGRESS;
/*
* Temporarily mark page as recently-used to discourage
* SlruSelectLRUPage from selecting it again for someone else.
*/
SlruRecentlyUsed(shared, slotno);
/*
* We must grab the per-buffer lock to do I/O. To avoid deadlock,
* must release ControlLock while waiting for per-buffer lock.
* Fortunately, most of the time the per-buffer lock shouldn't be
* already held, so we can do this:
*/
if (!LWLockConditionalAcquire(shared->buffer_locks[slotno],
LW_EXCLUSIVE))
{
LWLockRelease(shared->ControlLock);
LWLockAcquire(shared->buffer_locks[slotno], LW_EXCLUSIVE);
LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
}
/*
* Check to see if someone else already did the read, or took the
* buffer away from us. If so, restart from the top.
*/
if (shared->page_number[slotno] != pageno ||
shared->page_status[slotno] != SLRU_PAGE_READ_IN_PROGRESS)
{
LWLockRelease(shared->buffer_locks[slotno]);
continue;
}
/* Okay, release control lock and do the read */
LWLockRelease(shared->ControlLock);
ok = SlruPhysicalReadPage(ctl, pageno, slotno);
/* Re-acquire shared control lock and update page state */
LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
Assert(shared->page_number[slotno] == pageno &&
shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS);
shared->page_status[slotno] = ok ? SLRU_PAGE_CLEAN : SLRU_PAGE_EMPTY;
LWLockRelease(shared->buffer_locks[slotno]);
/* Now it's okay to ereport if we failed */
if (!ok)
SlruReportIOError(ctl, pageno, xid);
SlruRecentlyUsed(shared, slotno);
//.........这里部分代码省略.........
示例15: TablespaceCreateDbspace
/*
* Each database using a table space is isolated into its own name space
* by a subdirectory named for the database OID. On first creation of an
* object in the tablespace, create the subdirectory. If the subdirectory
* already exists, fall through quietly.
*
* isRedo indicates that we are creating an object during WAL replay.
* In this case we will cope with the possibility of the tablespace
* directory not being there either --- this could happen if we are
* replaying an operation on a table in a subsequently-dropped tablespace.
* We handle this by making a directory in the place where the tablespace
* symlink would normally be. This isn't an exact replay of course, but
* it's the best we can do given the available information.
*
* If tablespaces are not supported, we still need it in case we have to
* re-create a database subdirectory (of $PGDATA/base) during WAL replay.
*/
void
TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo)
{
struct stat st;
char *dir;
/*
* The global tablespace doesn't have per-database subdirectories, so
* nothing to do for it.
*/
if (spcNode == GLOBALTABLESPACE_OID)
return;
Assert(OidIsValid(spcNode));
Assert(OidIsValid(dbNode));
dir = GetDatabasePath(dbNode, spcNode);
if (stat(dir, &st) < 0)
{
/* Directory does not exist? */
if (errno == ENOENT)
{
/*
* Acquire TablespaceCreateLock to ensure that no DROP TABLESPACE
* or TablespaceCreateDbspace is running concurrently.
*/
LWLockAcquire(TablespaceCreateLock, LW_EXCLUSIVE);
/*
* Recheck to see if someone created the directory while we were
* waiting for lock.
*/
if (stat(dir, &st) == 0 && S_ISDIR(st.st_mode))
{
/* Directory was created */
}
else
{
/* Directory creation failed? */
if (mkdir(dir, S_IRWXU) < 0)
{
char *parentdir;
/* Failure other than not exists or not in WAL replay? */
if (errno != ENOENT || !isRedo)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not create directory \"%s\": %m",
dir)));
/*
* Parent directories are missing during WAL replay, so
* continue by creating simple parent directories rather
* than a symlink.
*/
/* create two parents up if not exist */
parentdir = pstrdup(dir);
get_parent_directory(parentdir);
get_parent_directory(parentdir);
/* Can't create parent and it doesn't already exist? */
if (mkdir(parentdir, S_IRWXU) < 0 && errno != EEXIST)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not create directory \"%s\": %m",
parentdir)));
pfree(parentdir);
/* create one parent up if not exist */
parentdir = pstrdup(dir);
get_parent_directory(parentdir);
/* Can't create parent and it doesn't already exist? */
if (mkdir(parentdir, S_IRWXU) < 0 && errno != EEXIST)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not create directory \"%s\": %m",
parentdir)));
pfree(parentdir);
/* Create database directory */
if (mkdir(dir, S_IRWXU) < 0)
ereport(ERROR,
//.........这里部分代码省略.........