本文整理汇总了C++中SpinLockAcquire函数的典型用法代码示例。如果您正苦于以下问题:C++ SpinLockAcquire函数的具体用法?C++ SpinLockAcquire怎么用?C++ SpinLockAcquire使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了SpinLockAcquire函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: RequestXLogStreaming
/*
* Request postmaster to start walreceiver.
*
* recptr indicates the position where streaming should begin, and conninfo
* is a libpq connection string to use.
*/
void
RequestXLogStreaming(XLogRecPtr recptr, const char *conninfo)
{
/* use volatile pointer to prevent code rearrangement */
volatile WalRcvData *walrcv = WalRcv;
pg_time_t now = (pg_time_t) time(NULL);
/*
* We always start at the beginning of the segment. That prevents a broken
* segment (i.e., with no records in the first half of a segment) from
* being created by XLOG streaming, which might cause trouble later on if
* the segment is e.g archived.
*/
if (recptr.xrecoff % XLogSegSize != 0)
recptr.xrecoff -= recptr.xrecoff % XLogSegSize;
SpinLockAcquire(&walrcv->mutex);
/* It better be stopped before we try to restart it */
Assert(walrcv->walRcvState == WALRCV_STOPPED);
if (conninfo != NULL)
strlcpy((char *) walrcv->conninfo, conninfo, MAXCONNINFO);
else
walrcv->conninfo[0] = '\0';
walrcv->walRcvState = WALRCV_STARTING;
walrcv->startTime = now;
/*
* If this is the first startup of walreceiver, we initialize receivedUpto
* and latestChunkStart to receiveStart.
*/
if (walrcv->receiveStart.xlogid == 0 &&
walrcv->receiveStart.xrecoff == 0)
{
walrcv->receivedUpto = recptr;
walrcv->latestChunkStart = recptr;
}
walrcv->receiveStart = recptr;
SpinLockRelease(&walrcv->mutex);
SendPostmasterSignal(PMSIGNAL_START_WALRECEIVER);
}
示例2: ReplicationSlotAcquire
/*
* Find a previously created slot and mark it as used by this backend.
*/
void
ReplicationSlotAcquire(const char *name)
{
ReplicationSlot *slot = NULL;
int i;
int active_pid = 0;
Assert(MyReplicationSlot == NULL);
ReplicationSlotValidateName(name, ERROR);
/* Search for the named slot and mark it active if we find it. */
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
for (i = 0; i < max_replication_slots; i++)
{
ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
if (s->in_use && strcmp(name, NameStr(s->data.name)) == 0)
{
SpinLockAcquire(&s->mutex);
active_pid = s->active_pid;
if (active_pid == 0)
s->active_pid = MyProcPid;
SpinLockRelease(&s->mutex);
slot = s;
break;
}
}
LWLockRelease(ReplicationSlotControlLock);
/* If we did not find the slot or it was already active, error out. */
if (slot == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("replication slot \"%s\" does not exist", name)));
if (active_pid != 0)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
errmsg("replication slot \"%s\" is already active for PID %d",
name, active_pid)));
/* We made this slot active, so it's ours now. */
MyReplicationSlot = slot;
}
示例3: ReplicationSlotsComputeRequiredXmin
/*
* Compute the oldest xmin across all slots and store it in the ProcArray.
*/
void
ReplicationSlotsComputeRequiredXmin(bool already_locked)
{
int i;
TransactionId agg_xmin = InvalidTransactionId;
TransactionId agg_catalog_xmin = InvalidTransactionId;
Assert(ReplicationSlotCtl != NULL);
if (!already_locked)
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
for (i = 0; i < max_replication_slots; i++)
{
ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
TransactionId effective_xmin;
TransactionId effective_catalog_xmin;
if (!s->in_use)
continue;
SpinLockAcquire(&s->mutex);
effective_xmin = s->effective_xmin;
effective_catalog_xmin = s->effective_catalog_xmin;
SpinLockRelease(&s->mutex);
/* check the data xmin */
if (TransactionIdIsValid(effective_xmin) &&
(!TransactionIdIsValid(agg_xmin) ||
TransactionIdPrecedes(effective_xmin, agg_xmin)))
agg_xmin = effective_xmin;
/* check the catalog xmin */
if (TransactionIdIsValid(effective_catalog_xmin) &&
(!TransactionIdIsValid(agg_catalog_xmin) ||
TransactionIdPrecedes(effective_catalog_xmin, agg_catalog_xmin)))
agg_catalog_xmin = effective_catalog_xmin;
}
if (!already_locked)
LWLockRelease(ReplicationSlotControlLock);
ProcArraySetReplicationSlotXmin(agg_xmin, agg_catalog_xmin, already_locked);
}
示例4: DecrementMcGroupUsageCount
static
FSTATUS
DecrementMcGroupUsageCount(CLIENT_HANDLE SdClientHandle, MC_GROUP_ID *pMcGroupId)
{
FSTATUS Status = FSUCCESS;
MC_CLIENT *pMcClient;
MC_GROUP *pMcGroup;
_DBG_ENTER_LVL(_DBG_LVL_FUNC_TRACE, DecrementMcGroupUsageCount);
SpinLockAcquire(&MulticastLock);
pMcClient = FindMcClient(SdClientHandle, pMcGroupId);
if (pMcClient == NULL)
{
Status = FNOT_FOUND;
SpinLockRelease(&MulticastLock);
_DBG_ERROR(("DecrementMcGroupUsageCount Client Not Found.\n"));
goto exit;
}
pMcGroup = pMcClient->pMcGroup;
pMcClient->pMcGroup = NULL;
pMcClient->McClientDelete = TRUE;
QListRemoveItem(&pMcGroup->McClientList, &pMcClient->McGroupListItem);
if (QListIsEmpty(&pMcGroup->McClientList))
{
/* schedule the group removal in Maintenance thread */
pMcGroup->McGroupDelete = TRUE;
if (pMcGroup->McGroupState == MC_GROUP_STATE_AVAILABLE)
{
QueueSubnetDriverCall(pMcGroup, MC_GROUP_STATE_REQUEST_LEAVE);
}
}
SpinLockRelease(&MulticastLock);
exit:
_DBG_LEAVE_LVL( _DBG_LVL_FUNC_TRACE );
return Status;
}
示例5: DisownLatch
void
DisownLatch(volatile Latch *latch)
{
Assert(latch->is_shared);
Assert(latch->event != NULL);
/* Put the event handle back to the pool */
SpinLockAcquire(&sharedHandles->mutex);
if (sharedHandles->nfreehandles >= sharedHandles->maxhandles)
{
SpinLockRelease(&sharedHandles->mutex);
elog(PANIC, "too many free event handles");
}
sharedHandles->handles[sharedHandles->nfreehandles] = latch->event;
sharedHandles->nfreehandles++;
SpinLockRelease(&sharedHandles->mutex);
latch->event = NULL;
}
示例6: element_alloc
/*
* allocate some new elements and link them into the free list
*/
static bool
element_alloc(HTAB *hashp, int nelem)
{
/* use volatile pointer to prevent code rearrangement */
volatile HASHHDR *hctlv = hashp->hctl;
Size elementSize;
HASHELEMENT *firstElement;
HASHELEMENT *tmpElement;
HASHELEMENT *prevElement;
int i;
/* Each element has a HASHELEMENT header plus user data. */
elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(hctlv->entrysize);
CurrentDynaHashCxt = hashp->hcxt;
firstElement = (HASHELEMENT *) hashp->alloc(nelem * elementSize);
if (!firstElement)
return false;
/* prepare to link all the new entries into the freelist */
prevElement = NULL;
tmpElement = firstElement;
for (i = 0; i < nelem; i++)
{
tmpElement->link = prevElement;
prevElement = tmpElement;
tmpElement = (HASHELEMENT *) (((char *) tmpElement) + elementSize);
}
/* if partitioned, must lock to touch freeList */
if (IS_PARTITIONED(hctlv))
SpinLockAcquire(&hctlv->mutex);
/* freelist could be nonempty if two backends did this concurrently */
firstElement->link = hctlv->freeList;
hctlv->freeList = prevElement;
if (IS_PARTITIONED(hctlv))
SpinLockRelease(&hctlv->mutex);
return true;
}
示例7: ShmemAlloc
/*
* ShmemAlloc -- allocate max-aligned chunk from shared memory
*
* Assumes ShmemLock and ShmemSegHdr are initialized.
*
* Returns: real pointer to memory or NULL if we are out
* of space. Has to return a real pointer in order
* to be compatible with malloc().
*/
void *
ShmemAlloc(Size size)
{
Size newStart;
Size newFree;
void *newSpace;
/* use volatile pointer to prevent code rearrangement */
volatile PGShmemHeader *shmemseghdr = ShmemSegHdr;
/*
* ensure all space is adequately aligned.
*/
size = MAXALIGN(size);
Assert(shmemseghdr != NULL);
SpinLockAcquire(ShmemLock);
newStart = shmemseghdr->freeoffset;
/* extra alignment for large requests, since they are probably buffers */
if (size >= BLCKSZ)
newStart = BUFFERALIGN(newStart);
newFree = newStart + size;
if (newFree <= shmemseghdr->totalsize)
{
newSpace = (void *) ((char *) ShmemBase + newStart);
shmemseghdr->freeoffset = newFree;
}
else
newSpace = NULL;
SpinLockRelease(ShmemLock);
if (!newSpace)
ereport(WARNING,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory")));
return newSpace;
}
示例8: LWLockAssign
/*
* LWLockAssign - assign a dynamically-allocated LWLock number
*
* We interlock this using the same spinlock that is used to protect
* ShmemAlloc(). Interlocking is not really necessary during postmaster
* startup, but it is needed if any user-defined code tries to allocate
* LWLocks after startup.
*/
LWLockId
LWLockAssign(void)
{
LWLockId result;
/* use volatile pointer to prevent code rearrangement */
volatile int *LWLockCounter;
LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
SpinLockAcquire(ShmemLock);
if (LWLockCounter[0] >= LWLockCounter[1])
{
SpinLockRelease(ShmemLock);
elog(ERROR, "no more LWLockIds available");
}
result = (LWLockId) (LWLockCounter[0]++);
SpinLockRelease(ShmemLock);
return result;
}
示例9: ReplicationSlotPersist
/*
* Convert a slot that's marked as RS_EPHEMERAL to a RS_PERSISTENT slot,
* guaranteeing it will be there after an eventual crash.
*/
void
ReplicationSlotPersist(void)
{
ReplicationSlot *slot = MyReplicationSlot;
Assert(slot != NULL);
Assert(slot->data.persistency != RS_PERSISTENT);
{
volatile ReplicationSlot *vslot = slot;
SpinLockAcquire(&slot->mutex);
vslot->data.persistency = RS_PERSISTENT;
SpinLockRelease(&slot->mutex);
}
ReplicationSlotMarkDirty();
ReplicationSlotSave();
}
示例10: Cache_AddToFreelist
/*
* Link an entry back in the cache freelist
*
* The entry must be already marked as free by the caller.
*/
void
Cache_AddToFreelist(Cache *cache, CacheEntry *entry)
{
Assert(NULL != cache);
Assert(NULL != entry);
CACHE_ASSERT_WIPED(entry);
Assert(entry->state == CACHE_ENTRY_FREE);
CacheHdr *cacheHdr = cache->cacheHdr;
/* Must lock to touch freeList */
SpinLockAcquire(&cacheHdr->spinlock);
entry->nextEntry = cacheHdr->freeList;
cacheHdr->freeList = entry;
Cache_UpdatePerfCounter(&cacheHdr->cacheStats.noFreeEntries, 1 /* delta */);
SpinLockRelease(&cacheHdr->spinlock);
}
示例11: StrategyFreeBuffer
/*
* StrategyFreeBuffer: put a buffer on the freelist
*/
void
StrategyFreeBuffer(volatile BufferDesc *buf)
{
SpinLockAcquire(&StrategyControl->buffer_strategy_lock);
/*
* It is possible that we are told to put something in the freelist that
* is already in it; don't screw up the list if so.
*/
if (buf->freeNext == FREENEXT_NOT_IN_LIST)
{
buf->freeNext = StrategyControl->firstFreeBuffer;
if (buf->freeNext < 0)
StrategyControl->lastFreeBuffer = buf->buf_id;
StrategyControl->firstFreeBuffer = buf->buf_id;
}
SpinLockRelease(&StrategyControl->buffer_strategy_lock);
}
示例12: shm_toc_freespace
/*
* Return the number of bytes that can still be allocated.
*/
extern Size
shm_toc_freespace(shm_toc *toc)
{
volatile shm_toc *vtoc = toc;
Size total_bytes;
Size allocated_bytes;
Size nentry;
Size toc_bytes;
SpinLockAcquire(&toc->toc_mutex);
total_bytes = vtoc->toc_total_bytes;
allocated_bytes = vtoc->toc_allocated_bytes;
nentry = vtoc->toc_nentry;
SpinLockRelease(&toc->toc_mutex);
toc_bytes = offsetof(shm_toc, toc_entry) +nentry * sizeof(shm_toc_entry);
Assert(allocated_bytes + BUFFERALIGN(toc_bytes) <= total_bytes);
return total_bytes - (allocated_bytes + BUFFERALIGN(toc_bytes));
}
示例13: GetBackendDataForProc
/*
* GetBackendDataForProc writes the backend data for the given process to
* result. If the process is part of a lock group (parallel query) it
* returns the leader data instead.
*/
void
GetBackendDataForProc(PGPROC *proc, BackendData *result)
{
BackendData *backendData = NULL;
int pgprocno = proc->pgprocno;
if (proc->lockGroupLeader != NULL)
{
pgprocno = proc->lockGroupLeader->pgprocno;
}
backendData = &backendManagementShmemData->backends[pgprocno];
SpinLockAcquire(&backendData->mutex);
memcpy(result, backendData, sizeof(BackendData));
SpinLockRelease(&backendData->mutex);
}
示例14: start_group
static void
start_group(ContQueryProcGroup *grp)
{
int slot_idx;
int group_id;
SpinLockInit(&grp->mutex);
SpinLockAcquire(&grp->mutex);
grp->active = true;
grp->terminate = false;
/* Start workers */
for (slot_idx = 0, group_id = 0; slot_idx < continuous_query_num_workers; slot_idx++, group_id++)
{
ContQueryProc *proc = &grp->procs[slot_idx];
MemSet(proc, 0, sizeof(ContQueryProc));
proc->type = Worker;
proc->group_id = group_id;
proc->group = grp;
run_background_proc(proc);
}
/* Start combiners */
for (group_id = 0; slot_idx < TOTAL_SLOTS; slot_idx++, group_id++)
{
ContQueryProc *proc = &grp->procs[slot_idx];
MemSet(proc, 0, sizeof(ContQueryProc));
proc->type = Combiner;
proc->group_id = group_id;
proc->group = grp;
run_background_proc(proc);
}
SpinLockRelease(&grp->mutex);
}
示例15: ShmemAllocNoError
/*
* ShmemAllocNoError -- allocate max-aligned chunk from shared memory
*
* As ShmemAlloc, but returns NULL if out of space, rather than erroring.
*/
void *
ShmemAllocNoError(Size size)
{
Size newStart;
Size newFree;
void *newSpace;
/*
* Ensure all space is adequately aligned. We used to only MAXALIGN this
* space but experience has proved that on modern systems that is not good
* enough. Many parts of the system are very sensitive to critical data
* structures getting split across cache line boundaries. To avoid that,
* attempt to align the beginning of the allocation to a cache line
* boundary. The calling code will still need to be careful about how it
* uses the allocated space - e.g. by padding each element in an array of
* structures out to a power-of-two size - but without this, even that
* won't be sufficient.
*/
size = CACHELINEALIGN(size);
Assert(ShmemSegHdr != NULL);
SpinLockAcquire(ShmemLock);
newStart = ShmemSegHdr->freeoffset;
newFree = newStart + size;
if (newFree <= ShmemSegHdr->totalsize)
{
newSpace = (void *) ((char *) ShmemBase + newStart);
ShmemSegHdr->freeoffset = newFree;
}
else
newSpace = NULL;
SpinLockRelease(ShmemLock);
/* note this assert is okay with newSpace == NULL */
Assert(newSpace == (void *) CACHELINEALIGN(newSpace));
return newSpace;
}