本文整理汇总了C++中cv_init函数的典型用法代码示例。如果您正苦于以下问题:C++ cv_init函数的具体用法?C++ cv_init怎么用?C++ cv_init使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cv_init函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: idm_conn_sm_init
idm_status_t
idm_conn_sm_init(idm_conn_t *ic)
{
char taskq_name[32];
/*
* Caller should have assigned a unique connection ID. Use this
* connection ID to create a unique connection name string
*/
ASSERT(ic->ic_internal_cid != 0);
(void) snprintf(taskq_name, sizeof (taskq_name) - 1, "conn_sm%08x",
ic->ic_internal_cid);
ic->ic_state_taskq = taskq_create(taskq_name, 1, minclsyspri, 4, 16384,
TASKQ_PREPOPULATE);
if (ic->ic_state_taskq == NULL) {
return (IDM_STATUS_FAIL);
}
idm_sm_audit_init(&ic->ic_state_audit);
mutex_init(&ic->ic_state_mutex, NULL, MUTEX_DEFAULT, NULL);
cv_init(&ic->ic_state_cv, NULL, CV_DEFAULT, NULL);
ic->ic_state = CS_S1_FREE;
ic->ic_last_state = CS_S1_FREE;
return (IDM_STATUS_SUCCESS);
}
示例2: cnread
/* ARGSUSED */
static int
cnread(dev_t dev, struct uio *uio, struct cred *cred)
{
kcondvar_t sleep_forever;
kmutex_t sleep_forever_mutex;
if (rconsvp == NULL) {
/*
* Go to sleep forever. This seems like the least
* harmful thing to do if there's no console.
* EOF might be better if we're ending up single-user
* mode.
*/
cv_init(&sleep_forever, NULL, CV_DRIVER, NULL);
mutex_init(&sleep_forever_mutex, NULL, MUTEX_DRIVER, NULL);
mutex_enter(&sleep_forever_mutex);
(void) cv_wait_sig(&sleep_forever, &sleep_forever_mutex);
mutex_exit(&sleep_forever_mutex);
return (EIO);
}
if (rconsvp->v_stream != NULL)
return (strread(rconsvp, uio, cred));
else
return (cdev_read(rconsdev, uio, cred));
}
示例3: testcall
int
testcall(struct lwp *l, void *uap, register_t *retval)
{
int i;
mutex_init(&test_mutex, MUTEX_DEFAULT, IPL_NONE);
cv_init(&test_cv, "testcv");
printf("test: creating threads\n");
test_count = NTHREADS;
test_exit = 0;
for (i = 0; i < test_count; i++)
kthread_create(0, KTHREAD_MPSAFE, NULL, thread1, &primes[i],
&test_threads[i], "thread%d", i);
printf("test: sleeping\n");
mutex_enter(&test_mutex);
while (test_count != 0) {
(void)cv_timedwait(&test_cv, &test_mutex, hz * SECONDS);
test_exit = 1;
}
mutex_exit(&test_mutex);
printf("test: finished\n");
cv_destroy(&test_cv);
mutex_destroy(&test_mutex);
return 0;
}
示例4: fr_loginit
/* ------------------------------------------------------------------------ */
int fr_loginit()
{
int i;
for (i = IPL_LOGMAX; i >= 0; i--) {
iplt[i] = NULL;
ipll[i] = NULL;
iplh[i] = &iplt[i];
iplused[i] = 0;
bzero((char *)&iplcrc[i], sizeof(iplcrc[i]));
# ifdef IPL_SELECT
iplog_ss[i].read_waiter = 0;
iplog_ss[i].state = 0;
# endif
# if defined(linux) && defined(_KERNEL)
init_waitqueue_head(iplh_linux + i);
# endif
}
# if SOLARIS && defined(_KERNEL)
cv_init(&iplwait, "ipl condvar", CV_DRIVER, NULL);
# endif
MUTEX_INIT(&ipl_mutex, "ipf log mutex");
ipl_log_init = 1;
return 0;
}
示例5: log_event_init
/*
* log_event_init - Allocate and initialize log_event data structures.
*/
void
log_event_init()
{
mutex_init(&eventq_head_mutex, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&eventq_sent_mutex, NULL, MUTEX_DEFAULT, NULL);
cv_init(&log_event_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&event_qfull_mutex, NULL, MUTEX_DEFAULT, NULL);
cv_init(&event_qfull_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&event_pause_mutex, NULL, MUTEX_DEFAULT, NULL);
cv_init(&event_pause_cv, NULL, CV_DEFAULT, NULL);
mutex_init(®istered_channel_mutex, NULL, MUTEX_DEFAULT, NULL);
sysevent_evc_init();
}
示例6: init_completion
/*
* Completion API
*/
void
init_completion(struct completion *c)
{
cv_init(&c->cv, "VCHI completion cv");
mtx_init(&c->lock, "VCHI completion lock", "condvar", MTX_DEF);
c->done = 0;
}
示例7: dsl_pool_open_impl
static dsl_pool_t *
dsl_pool_open_impl(spa_t *spa, uint64_t txg)
{
dsl_pool_t *dp;
blkptr_t *bp = spa_get_rootblkptr(spa);
dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP);
dp->dp_spa = spa;
dp->dp_meta_rootbp = *bp;
rrw_init(&dp->dp_config_rwlock, B_TRUE);
txg_init(dp, txg);
txg_list_create(&dp->dp_dirty_datasets,
offsetof(dsl_dataset_t, ds_dirty_link));
txg_list_create(&dp->dp_dirty_zilogs,
offsetof(zilog_t, zl_dirty_link));
txg_list_create(&dp->dp_dirty_dirs,
offsetof(dsl_dir_t, dd_dirty_link));
txg_list_create(&dp->dp_sync_tasks,
offsetof(dsl_sync_task_t, dst_node));
mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL);
dp->dp_vnrele_taskq = taskq_create("zfs_vn_rele_taskq", 1, minclsyspri,
1, 4, 0);
return (dp);
}
示例8: scsipi_alloc_periph
/*
* allocate and init a scsipi_periph structure for a new device.
*/
struct scsipi_periph *
scsipi_alloc_periph(int malloc_flag)
{
struct scsipi_periph *periph;
u_int i;
periph = malloc(sizeof(*periph), M_DEVBUF, malloc_flag|M_ZERO);
if (periph == NULL)
return NULL;
periph->periph_dev = NULL;
/*
* Start with one command opening. The periph driver
* will grow this if it knows it can take advantage of it.
*/
periph->periph_openings = 1;
periph->periph_active = 0;
for (i = 0; i < PERIPH_NTAGWORDS; i++)
periph->periph_freetags[i] = 0xffffffff;
TAILQ_INIT(&periph->periph_xferq);
callout_init(&periph->periph_callout, 0);
cv_init(&periph->periph_cv, "periph");
return periph;
}
示例9: afs_getevent
/* Get and initialize event structure corresponding to lwp event (i.e. address)
* */
static afs_event_t *
afs_getevent(char *event)
{
afs_event_t *evp, *newp = 0;
int hashcode;
AFS_ASSERT_GLOCK();
hashcode = afs_evhash(event);
evp = afs_evhasht[hashcode];
while (evp) {
if (evp->event == event) {
evp->refcount++;
return evp;
}
if (evp->refcount == 0)
newp = evp;
evp = evp->next;
}
if (!newp) {
newp = osi_AllocSmallSpace(sizeof(afs_event_t));
afs_evhashcnt++;
newp->next = afs_evhasht[hashcode];
afs_evhasht[hashcode] = newp;
cv_init(&newp->cond, "event cond var", CV_DEFAULT, NULL);
newp->seq = 0;
}
newp->event = event;
newp->refcount = 1;
return newp;
}
示例10: smb_t2_init
int
smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, ushort_t *setup,
int setupcnt, struct smb_cred *scred)
{
int i;
int error;
bzero(t2p, sizeof (*t2p));
mutex_init(&t2p->t2_lock, NULL, MUTEX_DRIVER, NULL);
cv_init(&t2p->t2_cond, NULL, CV_DEFAULT, NULL);
t2p->t2_source = source;
t2p->t2_setupcount = (u_int16_t)setupcnt;
t2p->t2_setupdata = t2p->t2_setup;
for (i = 0; i < setupcnt; i++)
t2p->t2_setup[i] = setup[i];
t2p->t2_fid = 0xffff;
t2p->t2_cred = scred;
t2p->t2_share = (source->co_level == SMBL_SHARE ?
CPTOSS(source) : NULL); /* for smb up/down */
error = smb_rq_getenv(source, &t2p->t2_vc, NULL);
if (error)
return (error);
return (0);
}
示例11: HgfsInitRequestList
void
HgfsInitRequestList(HgfsSuperInfo *sip) // IN: Pointer to superinfo structure
{
int i;
DEBUG(VM_DEBUG_REQUEST, "HgfsInitRequestList().\n");
ASSERT(sip);
mutex_init(&sip->reqMutex, NULL, MUTEX_DRIVER, NULL);
/* Initialize free request list */
DblLnkLst_Init(&sip->reqFreeList);
mutex_init(&sip->reqFreeMutex, NULL, MUTEX_DRIVER, NULL);
cv_init(&sip->reqFreeCondVar, NULL, CV_DRIVER, NULL);
/*
* Initialize pool of requests
*
* Here we are setting each request's id to its index into the requestPool
* so this can be used as an identifier in reply packets. Each request's
* state is also set to UNUSED and is added to the free list.
*/
for (i = 0; i < ARRAYSIZE(requestPool); i++) {
requestPool[i].id = i;
requestPool[i].state = HGFS_REQ_UNUSED;
DblLnkLst_Init(&requestPool[i].listNode);
DblLnkLst_LinkLast(&sip->reqFreeList, &requestPool[i].listNode);
}
//HgfsDebugPrintReqList(&sip->reqFreeList);
DEBUG(VM_DEBUG_REQUEST, "HgfsInitRequestList() done.\n");
}
示例12: smb_rq_init
int
smb_rq_init(struct smb_rq *rqp, struct smb_connobj *co, uchar_t cmd,
struct smb_cred *scred)
{
int error;
bzero(rqp, sizeof (*rqp));
mutex_init(&rqp->sr_lock, NULL, MUTEX_DRIVER, NULL);
cv_init(&rqp->sr_cond, NULL, CV_DEFAULT, NULL);
error = smb_rq_getenv(co, &rqp->sr_vc, &rqp->sr_share);
if (error)
return (error);
/*
* We copied a VC pointer (vcp) into rqp->sr_vc,
* but we do NOT do a smb_vc_hold here. Instead,
* the caller is responsible for the hold on the
* share or the VC as needed. For smbfs callers,
* the hold is on the share, via the smbfs mount.
* For nsmb ioctl callers, the hold is done when
* the driver handle gets VC or share references.
* This design avoids frequent hold/rele activity
* when creating and completing requests.
*/
rqp->sr_rexmit = SMBMAXRESTARTS;
rqp->sr_cred = scred; /* Note: ref hold done by caller. */
rqp->sr_pid = (uint16_t)ddi_get_pid();
error = smb_rq_new(rqp, cmd);
return (error);
}
示例13: rdc_lookup_host
/*
* Look up the supplied hostname in the rdc_link_down chain. Add a new
* entry if it isn't found. Return a pointer to the new or found entry.
*/
static rdc_link_down_t *
rdc_lookup_host(char *host)
{
rdc_link_down_t *p;
mutex_enter(&rdc_ping_lock);
if (rdc_link_down == NULL) {
rdc_link_down = kmem_zalloc(sizeof (*rdc_link_down), KM_SLEEP);
rdc_link_down->next = rdc_link_down;
}
for (p = rdc_link_down->next; p != rdc_link_down; p = p->next) {
if (strcmp(host, p->host) == 0) {
/* Match */
mutex_exit(&rdc_ping_lock);
return (p);
}
}
/* No match, must create a new entry */
p = kmem_zalloc(sizeof (*p), KM_SLEEP);
p->link_down = 1;
p->next = rdc_link_down->next;
rdc_link_down->next = p;
(void) strncpy(p->host, host, MAX_RDC_HOST_SIZE);
mutex_init(&p->syncd_mutex, NULL, MUTEX_DRIVER, NULL);
cv_init(&p->syncd_cv, NULL, CV_DRIVER, NULL);
mutex_exit(&rdc_ping_lock);
return (p);
}
示例14: ksem_alloc
/*
* ksem object management including creation and reference counting
* routines.
*/
static struct ksem *
ksem_alloc(struct ucred *ucred, mode_t mode, unsigned int value)
{
struct ksem *ks;
mtx_lock(&ksem_count_lock);
if (nsems == p31b_getcfg(CTL_P1003_1B_SEM_NSEMS_MAX) || ksem_dead) {
mtx_unlock(&ksem_count_lock);
return (NULL);
}
nsems++;
mtx_unlock(&ksem_count_lock);
ks = malloc(sizeof(*ks), M_KSEM, M_WAITOK | M_ZERO);
ks->ks_uid = ucred->cr_uid;
ks->ks_gid = ucred->cr_gid;
ks->ks_mode = mode;
ks->ks_value = value;
cv_init(&ks->ks_cv, "ksem");
vfs_timestamp(&ks->ks_birthtime);
ks->ks_atime = ks->ks_mtime = ks->ks_ctime = ks->ks_birthtime;
refcount_init(&ks->ks_ref, 1);
#ifdef MAC
mac_posixsem_init(ks);
mac_posixsem_create(ucred, ks);
#endif
return (ks);
}
示例15: AcpiOsCreateSemaphore
ACPI_STATUS
AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
ACPI_SEMAPHORE *OutHandle)
{
struct acpi_sema *as;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
if (OutHandle == NULL || MaxUnits == 0 || InitialUnits > MaxUnits)
return_ACPI_STATUS (AE_BAD_PARAMETER);
if ((as = malloc(sizeof(*as), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
return_ACPI_STATUS (AE_NO_MEMORY);
snprintf(as->as_name, sizeof(as->as_name), "ACPI sema (%p)", as);
mtx_init(&as->as_lock, as->as_name, NULL, MTX_DEF);
cv_init(&as->as_cv, as->as_name);
as->as_maxunits = MaxUnits;
as->as_units = InitialUnits;
*OutHandle = (ACPI_SEMAPHORE)as;
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created %s, max %u, initial %u\n",
as->as_name, MaxUnits, InitialUnits));
return_ACPI_STATUS (AE_OK);
}