本文整理汇总了C++中SpinLockInit函数的典型用法代码示例。如果您正苦于以下问题:C++ SpinLockInit函数的具体用法?C++ SpinLockInit怎么用?C++ SpinLockInit使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了SpinLockInit函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: CreateLWLocks
/*
* Allocate shmem space for LWLocks and initialize the locks.
*/
void
CreateLWLocks(void)
{
int numLocks = NumLWLocks();
uint32 spaceLocks = LWLockShmemSize();
LWLock *lock;
int id;
/* Allocate space */
LWLockArray = (LWLock *) ShmemAlloc(spaceLocks);
/*
* Initialize all LWLocks to "unlocked" state
*/
for (id = 0, lock = LWLockArray; id < numLocks; id++, lock++)
{
SpinLockInit(&lock->mutex);
lock->releaseOK = true;
lock->exclusive = 0;
lock->shared = 0;
lock->head = NULL;
lock->tail = NULL;
}
/*
* Initialize the dynamic-allocation counter at the end of the array
*/
LWLockCounter = (int *) lock;
LWLockCounter[0] = (int) NumFixedLWLocks;
LWLockCounter[1] = numLocks;
}
示例2: ApplyLauncherShmemInit
/*
* ApplyLauncherShmemInit
* Allocate and initialize replication launcher shared memory
*/
void
ApplyLauncherShmemInit(void)
{
bool found;
LogicalRepCtx = (LogicalRepCtxStruct *)
ShmemInitStruct("Logical Replication Launcher Data",
ApplyLauncherShmemSize(),
&found);
if (!found)
{
int slot;
memset(LogicalRepCtx, 0, ApplyLauncherShmemSize());
/* Initialize memory and spin locks for each worker slot. */
for (slot = 0; slot < max_logical_replication_workers; slot++)
{
LogicalRepWorker *worker = &LogicalRepCtx->workers[slot];
memset(worker, 0, sizeof(LogicalRepWorker));
SpinLockInit(&worker->relmutex);
}
}
}
示例3: ReplicationSlotsShmemInit
/*
* Allocate and initialize walsender-related shared memory.
*/
void
ReplicationSlotsShmemInit(void)
{
bool found;
if (max_replication_slots == 0)
return;
ReplicationSlotCtl = (ReplicationSlotCtlData *)
ShmemInitStruct("ReplicationSlot Ctl", ReplicationSlotsShmemSize(),
&found);
LWLockRegisterTranche(LWTRANCHE_REPLICATION_SLOT_IO_IN_PROGRESS,
"replication_slot_io");
if (!found)
{
int i;
/* First time through, so initialize */
MemSet(ReplicationSlotCtl, 0, ReplicationSlotsShmemSize());
for (i = 0; i < max_replication_slots; i++)
{
ReplicationSlot *slot = &ReplicationSlotCtl->replication_slots[i];
/* everything else is zeroed by the memset above */
SpinLockInit(&slot->mutex);
LWLockInitialize(&slot->io_in_progress_lock, LWTRANCHE_REPLICATION_SLOT_IO_IN_PROGRESS);
ConditionVariableInit(&slot->active_cv);
}
}
}
示例4: ReplicationSlotsShmemInit
/*
* Allocate and initialize walsender-related shared memory.
*/
void
ReplicationSlotsShmemInit(void)
{
bool found;
if (max_replication_slots == 0)
return;
ReplicationSlotCtl = (ReplicationSlotCtlData *)
ShmemInitStruct("ReplicationSlot Ctl", ReplicationSlotsShmemSize(),
&found);
if (!found)
{
int i;
/* First time through, so initialize */
MemSet(ReplicationSlotCtl, 0, ReplicationSlotsShmemSize());
for (i = 0; i < max_replication_slots; i++)
{
ReplicationSlot *slot = &ReplicationSlotCtl->replication_slots[i];
/* everything else is zeroed by the memset above */
SpinLockInit(&slot->mutex);
slot->io_in_progress_lock = LWLockAssign();
}
}
}
示例5: LatchShmemInit
/*
* LatchShmemInit
* Allocate and initialize shared memory needed for latches
*/
void
LatchShmemInit(void)
{
Size size = LatchShmemSize();
bool found;
sharedHandles = ShmemInitStruct("SharedEventHandles", size, &found);
/* If we're first, initialize the struct and allocate handles */
if (!found)
{
int i;
SECURITY_ATTRIBUTES sa;
/*
* Set up security attributes to specify that the events are
* inherited.
*/
ZeroMemory(&sa, sizeof(sa));
sa.nLength = sizeof(sa);
sa.bInheritHandle = TRUE;
SpinLockInit(&sharedHandles->mutex);
sharedHandles->maxhandles = NumSharedLatches();
sharedHandles->nfreehandles = sharedHandles->maxhandles;
for (i = 0; i < sharedHandles->maxhandles; i++)
{
sharedHandles->handles[i] = CreateEvent(&sa, TRUE, FALSE, NULL);
if (sharedHandles->handles[i] == NULL)
elog(ERROR, "CreateEvent failed: error code %d", (int) GetLastError());
}
}
}
示例6: startup_hacks
/*
* Place platform-specific startup hacks here. This is the right
* place to put code that must be executed early in the launch of any new
* server process. Note that this code will NOT be executed when a backend
* or sub-bootstrap process is forked, unless we are in a fork/exec
* environment (ie EXEC_BACKEND is defined).
*
* XXX The need for code here is proof that the platform in question
* is too brain-dead to provide a standard C execution environment
* without help. Avoid adding more here, if you can.
*/
static void
startup_hacks(const char *progname)
{
/*
* Windows-specific execution environment hacking.
*/
#ifdef WIN32
{
WSADATA wsaData;
int err;
/* Make output streams unbuffered by default */
setvbuf(stdout, NULL, _IONBF, 0);
setvbuf(stderr, NULL, _IONBF, 0);
/* Prepare Winsock */
err = WSAStartup(MAKEWORD(2, 2), &wsaData);
if (err != 0)
{
write_stderr("%s: WSAStartup failed: %d\n",
progname, err);
exit(1);
}
/* In case of general protection fault, don't show GUI popup box */
SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX);
}
#endif /* WIN32 */
/*
* Initialize dummy_spinlock, in case we are on a platform where we have
* to use the fallback implementation of pg_memory_barrier().
*/
SpinLockInit(&dummy_spinlock);
}
示例7: CreateSharedInvalidationState
/*
* CreateSharedInvalidationState
* Create and initialize the SI message buffer
*/
void
CreateSharedInvalidationState(void)
{
int i;
bool found;
/* Allocate space in shared memory */
shmInvalBuffer = (SISeg *)
ShmemInitStruct("shmInvalBuffer", SInvalShmemSize(), &found);
if (found)
return;
/* Clear message counters, save size of procState array, init spinlock */
shmInvalBuffer->minMsgNum = 0;
shmInvalBuffer->maxMsgNum = 0;
shmInvalBuffer->nextThreshold = CLEANUP_MIN;
shmInvalBuffer->lastBackend = 0;
shmInvalBuffer->maxBackends = MaxBackends;
SpinLockInit(&shmInvalBuffer->msgnumLock);
/* The buffer[] array is initially all unused, so we need not fill it */
/* Mark all backends inactive, and initialize nextLXID */
for (i = 0; i < shmInvalBuffer->maxBackends; i++)
{
shmInvalBuffer->procState[i].procPid = 0; /* inactive */
shmInvalBuffer->procState[i].proc = NULL;
shmInvalBuffer->procState[i].nextMsgNum = 0; /* meaningless */
shmInvalBuffer->procState[i].resetState = false;
shmInvalBuffer->procState[i].signaled = false;
shmInvalBuffer->procState[i].hasMessages = false;
shmInvalBuffer->procState[i].nextLXID = InvalidLocalTransactionId;
}
}
示例8: SharedFileSetInit
/*
* Initialize a space for temporary files that can be opened for read-only
* access by other backends. Other backends must attach to it before
* accessing it. Associate this SharedFileSet with 'seg'. Any contained
* files will be deleted when the last backend detaches.
*
* Files will be distributed over the tablespaces configured in
* temp_tablespaces.
*
* Under the covers the set is one or more directories which will eventually
* be deleted when there are no backends attached.
*/
void
SharedFileSetInit(SharedFileSet *fileset, dsm_segment *seg)
{
static uint32 counter = 0;
SpinLockInit(&fileset->mutex);
fileset->refcnt = 1;
fileset->creator_pid = MyProcPid;
fileset->number = counter;
counter = (counter + 1) % INT_MAX;
/* Capture the tablespace OIDs so that all backends agree on them. */
PrepareTempTablespaces();
fileset->ntablespaces =
GetTempTablespaces(&fileset->tablespaces[0],
lengthof(fileset->tablespaces));
if (fileset->ntablespaces == 0)
{
fileset->tablespaces[0] = DEFAULTTABLESPACE_OID;
fileset->ntablespaces = 1;
}
/* Register our cleanup callback. */
on_dsm_detach(seg, SharedFileSetOnDetach, PointerGetDatum(fileset));
}
示例9: entry_alloc
/*
* Allocate a new hashtable entry.
* caller must hold an exclusive lock on pgss->lock
*
* Note: despite needing exclusive lock, it's not an error for the target
* entry to already exist. This is because pgss_store releases and
* reacquires lock after failing to find a match; so someone else could
* have made the entry while we waited to get exclusive lock.
*/
static pgssEntry *
entry_alloc(pgssHashKey *key)
{
pgssEntry *entry;
bool found;
/* Caller must have clipped query properly */
Assert(key->query_len < pgss->query_size);
/* Make space if needed */
while (hash_get_num_entries(pgss_hash) >= pgss_max)
entry_dealloc();
/* Find or create an entry with desired hash code */
entry = (pgssEntry *) hash_search(pgss_hash, key, HASH_ENTER, &found);
if (!found)
{
/* New entry, initialize it */
/* dynahash tried to copy the key for us, but must fix query_ptr */
entry->key.query_ptr = entry->query;
/* reset the statistics */
memset(&entry->counters, 0, sizeof(Counters));
entry->counters.usage = USAGE_INIT;
/* re-initialize the mutex each time ... we assume no one using it */
SpinLockInit(&entry->mutex);
/* ... and don't forget the query text */
memcpy(entry->query, key->query_ptr, key->query_len);
entry->query[key->query_len] = '\0';
}
return entry;
}
示例10: ShmemDynAlloc0
StreamBatch *StreamBatchCreate(Bitmapset *readers, int num_tuples)
{
char *ptr = ShmemDynAlloc0(sizeof(StreamBatch) + BITMAPSET_SIZE(readers->nwords) + (bms_num_members(readers) * sizeof(int)));
StreamBatch *batch = (StreamBatch *) ptr;
int cq_id;
int i = 0;
batch->id = rand() ^ (int) MyProcPid;
batch->num_tups = num_tuples;
batch->num_wtups = bms_num_members(readers) * num_tuples;
SpinLockInit(&batch->mutex);
ptr += sizeof(StreamBatch);
batch->readers = (Bitmapset *) ptr;
memcpy(batch->readers, readers, BITMAPSET_SIZE(readers->nwords));
ptr += BITMAPSET_SIZE(readers->nwords);
batch->proc_runs = (int *) ptr;
readers = bms_copy(readers);
while ((cq_id = bms_first_member(readers)) != -1)
{
CQProcEntry *pentry = GetCQProcEntry(cq_id);
batch->proc_runs[i] = Max(pentry->proc_runs, pentry->pg_size);
i++;
}
pfree(readers);
return batch;
}
示例11: InitShmemAllocation
/*
* InitShmemAllocation() --- set up shared-memory space allocation.
*
* This should be called only in the postmaster or a standalone backend.
*/
void
InitShmemAllocation(void)
{
PGShmemHeader *shmhdr = ShmemSegHdr;
Assert(shmhdr != NULL);
/*
* Initialize the spinlock used by ShmemAlloc. We have to do the space
* allocation the hard way, since obviously ShmemAlloc can't be called
* yet.
*/
ShmemLock = (slock_t *) (((char *) shmhdr) + shmhdr->freeoffset);
shmhdr->freeoffset += MAXALIGN(sizeof(slock_t));
Assert(shmhdr->freeoffset <= shmhdr->totalsize);
SpinLockInit(ShmemLock);
/* ShmemIndex can't be set up yet (need LWLocks first) */
shmhdr->index = NULL;
ShmemIndex = (HTAB *) NULL;
/*
* Initialize ShmemVariableCache for transaction manager. (This doesn't
* really belong here, but not worth moving.)
*/
ShmemVariableCache = (VariableCache)
ShmemAlloc(sizeof(*ShmemVariableCache));
memset(ShmemVariableCache, 0, sizeof(*ShmemVariableCache));
}
示例12: WalSndShmemInit
/* Allocate and initialize walsender-related shared memory */
void
WalSndShmemInit(void)
{
bool found;
int i;
WalSndCtl = (WalSndCtlData *)
ShmemInitStruct("Wal Sender Ctl", WalSndShmemSize(), &found);
if (!found)
{
/* First time through, so initialize */
MemSet(WalSndCtl, 0, WalSndShmemSize());
SHMQueueInit(&(WalSndCtl->SyncRepQueue));
for (i = 0; i < max_wal_senders; i++)
{
WalSnd *walsnd = &WalSndCtl->walsnds[i];
SpinLockInit(&walsnd->mutex);
InitSharedLatch(&walsnd->latch);
}
}
}
示例13: MulticastInitialize
FSTATUS
MulticastInitialize(void)
{
FSTATUS Status;
_DBG_ENTER_LVL(_DBG_LVL_FUNC_TRACE, InitializeMulticast);
McSdHandle = NULL;
QListInit(&MasterMcGroupList);
QListInit(&MasterMcClientList);
SpinLockInitState(&MulticastLock);
SpinLockInit(&MulticastLock);
TimerInitState(&MaintenanceTimer);
TimerInit(&MaintenanceTimer, McMaintenance, NULL);
MaintenanceTimerActivated = FALSE;
Status = iba_sd_register(&McSdHandle, NULL);
if (Status != FSUCCESS)
{
McSdHandle = NULL;
_DBG_ERROR(("Multicast Module Not Able To Register With Subnet Driver "
"Status = %d.\n", Status));
}
_DBG_LEAVE_LVL( _DBG_LVL_FUNC_TRACE );
return Status;
}
示例14: ExecBitmapHeapInitializeDSM
/* ----------------------------------------------------------------
* ExecBitmapHeapInitializeDSM
*
* Set up a parallel bitmap heap scan descriptor.
* ----------------------------------------------------------------
*/
void
ExecBitmapHeapInitializeDSM(BitmapHeapScanState *node,
ParallelContext *pcxt)
{
ParallelBitmapHeapState *pstate;
EState *estate = node->ss.ps.state;
dsa_area *dsa = node->ss.ps.state->es_query_dsa;
/* If there's no DSA, there are no workers; initialize nothing. */
if (dsa == NULL)
return;
pstate = shm_toc_allocate(pcxt->toc, node->pscan_len);
pstate->tbmiterator = 0;
pstate->prefetch_iterator = 0;
/* Initialize the mutex */
SpinLockInit(&pstate->mutex);
pstate->prefetch_pages = 0;
pstate->prefetch_target = 0;
pstate->state = BM_INITIAL;
ConditionVariableInit(&pstate->cv);
SerializeSnapshot(estate->es_snapshot, pstate->phs_snapshot_data);
shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id, pstate);
node->pstate = pstate;
}
示例15: InitBufferPool
/*
* Initialize shared buffer pool
*
* This is called once during shared-memory initialization (either in the
* postmaster, or in a standalone backend).
*/
void
InitBufferPool(void)
{
bool foundBufs,
foundDescs;
BufferDescriptors = (BufferDesc *)
ShmemInitStruct("Buffer Descriptors",
NBuffers * sizeof(BufferDesc), &foundDescs);
BufferBlocks = (char *)
ShmemInitStruct("Buffer Blocks",
NBuffers * (Size) BLCKSZ, &foundBufs);
if (foundDescs || foundBufs)
{
/* both should be present or neither */
Assert(foundDescs && foundBufs);
/* note: this path is only taken in EXEC_BACKEND case */
}
else
{
BufferDesc *buf;
int i;
buf = BufferDescriptors;
/*
* Initialize all the buffer headers.
*/
for (i = 0; i < NBuffers; buf++, i++)
{
CLEAR_BUFFERTAG(buf->tag);
buf->flags = 0;
buf->usage_count = 0;
buf->refcount = 0;
buf->wait_backend_pid = 0;
SpinLockInit(&buf->buf_hdr_lock);
buf->buf_id = i;
/*
* Initially link all the buffers together as unused. Subsequent
* management of this list is done by freelist.c.
*/
buf->freeNext = i + 1;
buf->io_in_progress_lock = LWLockAssign();
buf->content_lock = LWLockAssign();
}
/* Correct last entry of linked list */
BufferDescriptors[NBuffers - 1].freeNext = FREENEXT_END_OF_LIST;
}
/* Init other shared buffer-management stuff */
StrategyInitialize(!foundDescs);
}