本文整理汇总了C++中pthread_cond_timedwait函数的典型用法代码示例。如果您正苦于以下问题:C++ pthread_cond_timedwait函数的具体用法?C++ pthread_cond_timedwait怎么用?C++ pthread_cond_timedwait使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pthread_cond_timedwait函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: err_abort
/*
* Rutina inicial del thread.
*/
void *alarma_thread (void *arg)
{
alarma_t *alarma;
struct timespec cond_tiempo;
time_t ahora;
int estado, expirado;
/*
* Bloquea (lock) el mutex
* al principio -- se desbloqueara (unlocked) durante las
* esperas de condicion, de modo que el thread principal
* podra insertar nuevas alarmas.
*/
if ((estado = pthread_mutex_lock(&alarma_mutex)) != 0)
err_abort(estado, "Lock hilo gestor\n");
/*
* Itera continuamente procesando solicitudes. El thread
* desaparecera cuando el proceso termine.
*/
while (1) {
if (fin_solicitado == 1 && alarma_lista == NULL) {
pthread_exit(NULL);
}
/*
* Si la lista de alarmas esta vacia, espera a que se inserte
* una nueva. Al poner alarma_actual a 0 se informa a la rutina
* alarma_inserta() que alarma_thread no esta ocupado.
*/
while (alarma_lista == NULL && fin_solicitado != 1) { // no hay alarmas y no se haya solicitado salir
alarma_actual = 0;
if ((estado = pthread_cond_wait(&alarma_cond, &alarma_mutex)) != 0) // asi que permite que thread ppal se ejecute
err_abort(estado, "Pthread_cond_wait hilo gestor\n");
}
/*
* Toma la primera alarma;
* Si hay que procesarla prepara una espera condicional con
* temporizacion de la que saldra
* a) por tiempo expirado => informa de alarma cumplida
* b) por señal de nueva alarma mas corta => reinsertar en la lista
*/
if (alarma_lista != NULL) {
alarma = alarma_lista; // me cojo la primera
alarma_lista = alarma_lista->enlace; // retiro la primera de la lista
alarma_actual = alarma->tiempo;
cond_tiempo.tv_sec = alarma_actual;
cond_tiempo.tv_nsec = 0;
ahora = time(NULL);
if (alarma_actual - ahora <= 0) { // expirada (no hay que procesarla)
cond_tiempo.tv_sec = 0;
expirado = 1;
}
else { // hay que procesarla
expirado = 0;
estado = -1;
while (estado != ETIMEDOUT && estado != 0) // vencida o alarma mas temprana (insertada por el principio)
estado = pthread_cond_timedwait(&alarma_cond, &alarma_mutex, &cond_tiempo); // asi permite que thread ppal se ejecute
switch (estado) {
case 0: // se ha insertado una alarma mas corta
alarma_inserta(alarma); // se reinserta
break;
case ETIMEDOUT: // se ha completado la espera
expirado = 1;
break;
default:
err_abort(estado, "Pthread_cond_timed_wait hilo gestor\n");
break;
}
}
if (expirado == 1) {
printf("(%d) %s\n", alarma->segundos, alarma->mensaje);
free (alarma);
}
}
/* NOTA:
* Incluir como salida de depuracion los valores de la alarma
* que va a ser atendida
*/
#ifdef DEBUG
fprintf(stderr, "%d(%d)[\"%s\"] ", (int)alarma->tiempo,
(int)(alarma->tiempo - time(NULL)), alarma->mensaje);
fprintf (stderr, "\n");
#endif
}
}
示例2: amazon_gahp_grab_big_mutex
static void *worker_function( void *ptr )
{
Worker *worker = (Worker *)ptr;
/* Our new thread should grab the big mutex before starting.
* We will then
* release it and let other threads run when we would otherwise
* block.
*/
amazon_gahp_grab_big_mutex();
if( !worker ) {
dprintf (D_ALWAYS, "Ooops!! No input Data in worker thread\n");
amazon_gahp_release_big_mutex();
return NULL;
}
// Pop Request
Request *new_request = NULL;
struct timespec ts;
struct timeval tp;
while(1) {
// pthread_mutex_lock(&worker->m_mutex);
worker->m_is_doing = false;
worker->m_is_waiting = false;
if( worker->m_can_use == false ) {
// Need to die
// pthread_mutex_unlock(&worker->m_mutex);
worker_exit(worker, true);
}
while( (new_request = popRequest(worker)) == NULL ) {
worker->m_is_waiting = true;
// Get Current Time
gettimeofday(&tp, NULL);
/* Convert from timeval to timespec */
ts.tv_sec = tp.tv_sec;
ts.tv_nsec = tp.tv_usec * 1000;
ts.tv_sec += WORKER_MANAGER_TIMER_INTERVAL;
if( ioprocess ) {
if( ioprocess->numOfPendingRequest() > 0 ) {
continue;
}
}
//dprintf(D_FULLDEBUG, "Thread(%d) is calling cond_wait\n",
// worker->m_id);
// The pthread_cond_timedwait will block until signalled
// with more work from our main thread; so we MUST release
// the big fat mutex here or we will deadlock.
// amazon_gahp_release_big_mutex();
int retval = pthread_cond_timedwait(&worker->m_cond,
&global_big_mutex, &ts);
// amazon_gahp_grab_big_mutex();
if( worker->m_can_use == false ) {
// Need to die
worker->m_is_waiting = false;
// pthread_mutex_unlock(&worker->m_mutex);
worker_exit(worker, true);
}else {
// If timeout happends, need to check m_must_be_alive
if( retval == ETIMEDOUT ) {
//dprintf(D_FULLDEBUG, "Thread(%d) Wait timed out !\n",
// worker->m_id);
if( ioprocess ) {
if( ioprocess->numOfPendingRequest() > 0 ) {
continue;
}
}
if( !worker->m_must_be_alive ) {
// Need to die according to the min number of workers
worker->m_is_waiting = false;
worker->m_can_use = false;
// pthread_mutex_unlock(&worker->m_mutex);
worker_exit(worker, false);
}else {
dprintf(D_FULLDEBUG, "Thread(%d) must be alive for "
"another request\n", worker->m_id);
}
}
}
}
worker->m_is_doing = true;
worker->m_is_waiting = false;
//.........这里部分代码省略.........
示例3: while
int DBSpool::ExecuteSQL(const string& strSQL, MYSQL_RES** res, short& shIdentifier, int& iRelt, unsigned long long* iInsertId)
{
if (m_pDBConnection == NULL){
return SQL_TYPE_UNKNOW;
}
int i = -1, iState = 0;
timespec tTime;
timeval tNow;
while (i == -1){
gettimeofday(&tNow, NULL);
tTime.tv_sec = tNow.tv_sec + 1;
tTime.tv_nsec = 0;
//lock idle list
Lock(&m_thExecMutex);
if (m_IdleList.size() < 1){
Unlock(&m_thExecMutex);
//lock condition
Lock(&m_thIdleMutex);
iState = pthread_cond_timedwait(&m_thCond, &m_thIdleMutex, &tTime);
Unlock(&m_thIdleMutex);
if (iState == 0){
//lock idle list
Lock(&m_thExecMutex);
if (m_IdleList.size() > 0){
i = m_IdleList.front();
m_IdleList.pop_front();
Unlock(&m_thExecMutex);
break;
}else{
Unlock(&m_thExecMutex);
continue;
}
}else if(iState == ETIMEDOUT){
return SQL_TYPE_UNKNOW;
}
}else{
i = m_IdleList.front();
m_IdleList.pop_front();
Unlock(&m_thExecMutex);
}
}
if (i >= 0){
m_pDBConnection[i].Lock();
int iRet = m_pDBConnection[i].ExecuteSQL(strSQL, res, iRelt, iInsertId);
int iSize = 0;
switch (iRet){
case SQL_TYPE_SELECT:
shIdentifier = i + 1;
break;
case SQL_TYPE_INSERT:
case SQL_TYPE_UPDATE:
case SQL_TYPE_DELETE:
default:
shIdentifier = 0;
m_pDBConnection[i].Unlock();
//lock idle list
Lock(&m_thExecMutex);
m_IdleList.push_back(i);
iSize = m_IdleList.size();
Unlock(&m_thExecMutex);
//broadcast condition
if (iSize == 1){
Lock(&m_thIdleMutex);
pthread_cond_broadcast(&m_thCond);
Unlock(&m_thIdleMutex);
}
}
return iRet;
}
return SQL_TYPE_UNKNOW;
}
示例4: while
/*
* Run as pthread to keep saving slurmctld state information as needed,
* Use schedule_job_save(), schedule_node_save(), and schedule_part_save()
* to queue state save of each data structure
* no_data IN - unused
* RET - NULL
*/
extern void *slurmctld_state_save(void *no_data)
{
time_t last_save = 0, now;
double save_delay;
bool run_save;
int save_count;
while (1) {
/* wait for work to perform */
slurm_mutex_lock(&state_save_lock);
while (1) {
save_count = save_jobs + save_nodes + save_parts +
save_front_end + save_resv +
save_triggers;
now = time(NULL);
save_delay = difftime(now, last_save);
if (save_count &&
(!run_save_thread ||
(save_delay >= SAVE_MAX_WAIT))) {
last_save = now;
break; /* do the work */
} else if (!run_save_thread) {
run_save_thread = true;
slurm_mutex_unlock(&state_save_lock);
return NULL; /* shutdown */
} else if (save_count) { /* wait for a timeout */
struct timespec ts = {0, 0};
ts.tv_sec = now + 1;
pthread_cond_timedwait(&state_save_cond,
&state_save_lock, &ts);
} else { /* wait for more work */
pthread_cond_wait(&state_save_cond,
&state_save_lock);
}
}
/* save front_end node info if necessary */
run_save = false;
/* slurm_mutex_lock(&state_save_lock); done above */
if (save_front_end) {
run_save = true;
save_front_end = 0;
}
slurm_mutex_unlock(&state_save_lock);
if (run_save)
(void)dump_all_front_end_state();
/* save job info if necessary */
run_save = false;
slurm_mutex_lock(&state_save_lock);
if (save_jobs) {
run_save = true;
save_jobs = 0;
}
slurm_mutex_unlock(&state_save_lock);
if (run_save)
(void)dump_all_job_state();
/* save node info if necessary */
run_save = false;
slurm_mutex_lock(&state_save_lock);
if (save_nodes) {
run_save = true;
save_nodes = 0;
}
slurm_mutex_unlock(&state_save_lock);
if (run_save)
(void)dump_all_node_state();
/* save partition info if necessary */
run_save = false;
slurm_mutex_lock(&state_save_lock);
if (save_parts) {
run_save = true;
save_parts = 0;
}
slurm_mutex_unlock(&state_save_lock);
if (run_save)
(void)dump_all_part_state();
/* save reservation info if necessary */
run_save = false;
slurm_mutex_lock(&state_save_lock);
if (save_resv) {
run_save = true;
save_resv = 0;
}
slurm_mutex_unlock(&state_save_lock);
if (run_save)
(void)dump_all_resv_state();
/* save trigger info if necessary */
run_save = false;
//.........这里部分代码省略.........
示例5: status_thread
/*
* status_thread
* DESCRIPTION: Function executed by status message helper thread.
* Waits for a message to be displayed, then shows the
* message for 1.5 seconds before deleting it. If a
* new message has appeared in the meantime, restarts the
* clock and tries again.
* INPUTS: none (ignored)
* OUTPUTS: none
* RETURN VALUE: NULL
* SIDE EFFECTS: Changes the status message to an empty string.
*/
static void*
status_thread (void* ignore)
{
struct timespec ts; /* absolute wake-up time */
while (1) {
/*
* Wait for a message to appear. Note that we must check the
* condition after acquiring the lock, and that pthread_cond_wait
* yields the lock, then reacquires the lock before returning.
*/
(void)pthread_mutex_lock (&msg_lock);
while ('\0' == status_msg[0]) {
print_status_text(room_name(game_info.where), STATUS_ROOM_COLOR,
STATUS_BG_COLOR, ALIGN_LEFT, 1);
print_status_text(get_typed_command(), STATUS_COMMAND_COLOR,
STATUS_BG_COLOR, ALIGN_RIGHT, 0);
pthread_cond_wait (&msg_cv, &msg_lock);
}
/*
* A message is present: if we stop before the timeout
* passes, assume that a new one has been posted; if the
* timeout passes, clear the message and wait for a new one.
*/
do {
/* Get the current time. */
clock_gettime (CLOCK_REALTIME, &ts);
const char *command = get_typed_command();
int alignment = ALIGN_CENTER;
if (command[0] != '\0') {
alignment = ALIGN_LEFT;
}
print_status_text(status_msg, STATUS_FG_COLOR, STATUS_BG_COLOR,
alignment, 1);
print_status_text(command, STATUS_COMMAND_COLOR, STATUS_BG_COLOR,
ALIGN_RIGHT, 0);
/* Add 1.5 seconds to it. */
if (500000000 <= ts.tv_nsec) {
ts.tv_sec += 2;
ts.tv_nsec -= 500000000;
} else {
ts.tv_sec += 1;
ts.tv_nsec += 500000000;
}
/*
* And go to sleep. If we wake up due to anything but a
* timeout, we assume (possibly incorrectly) that a new
* message has appeared and try to wait 1.5 seconds again.
*/
} while (ETIMEDOUT !=
pthread_cond_timedwait (&msg_cv, &msg_lock, &ts));
/*
* Clear the message, then release the lock (remember that
* pthread_cond_timedwait reacquires the lock before returning).
*/
status_msg[0] = '\0';
(void)pthread_mutex_unlock (&msg_lock);
}
/* This code never executes--the thread should always be cancelled. */
return NULL;
}
示例6: CHKNULL
static void *csHeartbeat(void *parm)
{
CsState *csState = (CsState *) parm;
pthread_mutex_t mutex;
pthread_cond_t cv;
int cycleCount = 6;
int i;
Venture *venture;
int j;
Unit *unit;
Cell *cell;
int result;
struct timeval workTime;
struct timespec deadline;
CHKNULL(csState);
if (pthread_mutex_init(&mutex, NULL))
{
putSysErrmsg("Can't start heartbeat, mutex init failed", NULL);
return NULL;
}
if (pthread_cond_init(&cv, NULL))
{
pthread_mutex_destroy(&mutex);
putSysErrmsg("Can't start heartbeat, cond init failed", NULL);
return NULL;
}
#ifndef mingw
sigset_t signals;
sigfillset(&signals);
pthread_sigmask(SIG_BLOCK, &signals, NULL);
#endif
while (1)
{
lockMib();
if (cycleCount > 5) /* Every N5_INTERVAL sec. */
{
cycleCount = 0;
stopOtherConfigServers(csState);
}
for (i = 1; i <= MAX_VENTURE_NBR; i++)
{
venture = (_mib(NULL))->ventures[i];
if (venture == NULL) continue;
for (j = 0; j <= MAX_UNIT_NBR; j++)
{
unit = venture->units[j];
if (unit == NULL
|| (cell = unit->cell)->mamsEndpoint.ept
== NULL)
{
continue;
}
if (cell->heartbeatsMissed == 3)
{
clearMamsEndpoint
(&(cell->mamsEndpoint));
}
else if (cell->heartbeatsMissed < 3)
{
if (sendMamsMsg (&cell->mamsEndpoint,
&csState->tsif, heartbeat,
0, 0, NULL) < 0)
{
putErrmsg("Can't send \
heartbeat.", NULL);
}
}
cell->heartbeatsMissed++;
}
}
/* Now sleep for N3_INTERVAL seconds. */
unlockMib();
getCurrentTime(&workTime);
deadline.tv_sec = workTime.tv_sec + N3_INTERVAL;
deadline.tv_nsec = workTime.tv_usec * 1000;
pthread_mutex_lock(&mutex);
result = pthread_cond_timedwait(&cv, &mutex, &deadline);
pthread_mutex_unlock(&mutex);
if (result)
{
errno = result;
if (errno != ETIMEDOUT)
{
putSysErrmsg("Heartbeat failure", NULL);
break;
}
}
cycleCount++;
}
示例7: ui_wait_key_with_repeat
int ui_wait_key_with_repeat()
{
int key = -1;
// Loop to wait for more keys.
do {
int timeouts = UI_WAIT_KEY_TIMEOUT_SEC;
int rc = 0;
struct timeval now;
struct timespec timeout;
pthread_mutex_lock(&key_queue_mutex);
while (key_queue_len == 0 && timeouts > 0) {
gettimeofday(&now, NULL);
timeout.tv_sec = now.tv_sec;
timeout.tv_nsec = now.tv_usec * 1000;
timeout.tv_sec += REFRESH_TIME_USB_INTERVAL;
rc = 0;
while (key_queue_len == 0 && rc != ETIMEDOUT) {
rc = pthread_cond_timedwait(&key_queue_cond, &key_queue_mutex,
&timeout);
if (volumes_changed()) {
pthread_mutex_unlock(&key_queue_mutex);
return REFRESH;
}
}
timeouts -= REFRESH_TIME_USB_INTERVAL;
}
pthread_mutex_unlock(&key_queue_mutex);
if (rc == ETIMEDOUT && !usb_connected()) {
return -1;
}
// Loop to wait wait for more keys, or repeated keys to be ready.
while (1) {
unsigned long now_msec;
gettimeofday(&now, NULL);
now_msec = (now.tv_sec * 1000) + (now.tv_usec / 1000);
pthread_mutex_lock(&key_queue_mutex);
// Replacement for the while conditional, so we don't have to lock the entire
// loop, because that prevents the input system from touching the variables while
// the loop is running which causes problems.
if (key_queue_len == 0) {
pthread_mutex_unlock(&key_queue_mutex);
break;
}
key = key_queue[0];
memcpy(&key_queue[0], &key_queue[1], sizeof(int) * --key_queue_len);
// sanity check the returned key.
if (key < 0) {
pthread_mutex_unlock(&key_queue_mutex);
return key;
}
// Check for already released keys and drop them if they've repeated.
if (!key_pressed[key] && key_last_repeat[key] > 0) {
pthread_mutex_unlock(&key_queue_mutex);
continue;
}
if (key_can_repeat(key)) {
// Re-add the key if a repeat is expected, since we just popped it. The
// if below will determine when the key is actually repeated (returned)
// in the mean time, the key will be passed through the queue over and
// over and re-evaluated each time.
if (key_pressed[key]) {
key_queue[key_queue_len] = key;
key_queue_len++;
}
if ((now_msec > key_press_time[key] + UI_KEY_WAIT_REPEAT && now_msec > key_last_repeat[key] + UI_KEY_REPEAT_INTERVAL) ||
key_last_repeat[key] == 0) {
key_last_repeat[key] = now_msec;
} else {
// Not ready
pthread_mutex_unlock(&key_queue_mutex);
continue;
}
}
pthread_mutex_unlock(&key_queue_mutex);
return key;
}
} while (1);
return key;
}
示例8: runtime_semasleep
int32
runtime_semasleep (int64 ns)
{
M *m;
struct go_sem *sem;
int r;
m = runtime_m ();
sem = (struct go_sem *) m->waitsema;
if (ns >= 0)
{
int64 abs;
struct timespec ts;
int err;
abs = ns + runtime_nanotime ();
ts.tv_sec = abs / 1000000000LL;
ts.tv_nsec = abs % 1000000000LL;
err = 0;
#ifdef HAVE_SEM_TIMEDWAIT
r = sem_timedwait (&sem->sem, &ts);
if (r != 0)
err = errno;
#else
if (pthread_mutex_lock (&sem->mutex) != 0)
runtime_throw ("pthread_mutex_lock");
while ((r = sem_trywait (&sem->sem)) != 0)
{
r = pthread_cond_timedwait (&sem->cond, &sem->mutex, &ts);
if (r != 0)
{
err = r;
break;
}
}
if (pthread_mutex_unlock (&sem->mutex) != 0)
runtime_throw ("pthread_mutex_unlock");
#endif
if (err != 0)
{
if (err == ETIMEDOUT || err == EAGAIN || err == EINTR)
return -1;
runtime_throw ("sema_timedwait");
}
return 0;
}
while (sem_wait (&sem->sem) != 0)
{
if (errno == EINTR)
continue;
runtime_throw ("sem_wait");
}
return 0;
}
示例9: pthread_rwlock_timedwrlock
int
pthread_rwlock_timedwrlock (pthread_rwlock_t * rwlock,
const struct timespec *abstime)
{
int result;
pthread_rwlock_t rwl;
if (rwlock == NULL || *rwlock == NULL)
{
return EINVAL;
}
/*
* We do a quick check to see if we need to do more work
* to initialise a static rwlock. We check
* again inside the guarded section of ptw32_rwlock_check_need_init()
* to avoid race conditions.
*/
if (*rwlock == PTHREAD_RWLOCK_INITIALIZER)
{
result = ptw32_rwlock_check_need_init (rwlock);
if (result != 0 && result != EBUSY)
{
return result;
}
}
rwl = *rwlock;
if (rwl->nMagic != PTW32_RWLOCK_MAGIC)
{
return EINVAL;
}
if ((result =
pthread_mutex_timedlock (&(rwl->mtxExclusiveAccess), abstime)) != 0)
{
return result;
}
if ((result =
pthread_mutex_timedlock (&(rwl->mtxSharedAccessCompleted),
abstime)) != 0)
{
(void) pthread_mutex_unlock (&(rwl->mtxExclusiveAccess));
return result;
}
if (rwl->nExclusiveAccessCount == 0)
{
if (rwl->nCompletedSharedAccessCount > 0)
{
rwl->nSharedAccessCount -= rwl->nCompletedSharedAccessCount;
rwl->nCompletedSharedAccessCount = 0;
}
if (rwl->nSharedAccessCount > 0)
{
rwl->nCompletedSharedAccessCount = -rwl->nSharedAccessCount;
/*
* This routine may be a cancelation point
* according to POSIX 1003.1j section 18.1.2.
*/
#if defined(_MSC_VER) && _MSC_VER < 800
#pragma inline_depth(0)
#endif
pthread_cleanup_push (ptw32_rwlock_cancelwrwait, (void *) rwl);
do
{
result =
pthread_cond_timedwait (&(rwl->cndSharedAccessCompleted),
&(rwl->mtxSharedAccessCompleted),
abstime);
}
while (result == 0 && rwl->nCompletedSharedAccessCount < 0);
pthread_cleanup_pop ((result != 0) ? 1 : 0);
#if defined(_MSC_VER) && _MSC_VER < 800
#pragma inline_depth()
#endif
if (result == 0)
{
rwl->nSharedAccessCount = 0;
}
}
}
if (result == 0)
{
rwl->nExclusiveAccessCount++;
}
return result;
}
示例10: __ctBackgroundThreadWriter
void* __ctBackgroundThreadWriter(void* d)
{
FILE* serialFile;
char* fname = getenv("CONTECH_FE_FILE");
unsigned int wpos = 0;
unsigned int maxBuffersAlloc = 0, memLimitBufCount = 0;
size_t totalWritten = 0;
pct_serial_buffer memLimitQueue = NULL;
pct_serial_buffer memLimitQueueTail = NULL;
unsigned long long totalLimitTime = 0, startLimitTime, endLimitTime;
int mpiRank = __ctGetMPIRank();
int mpiPresent = __ctIsMPIPresent();
// TODO: Create MPI event
// TODO: Modify filename with MPI rank
// TODO: Only do the above when MPI is present
if (fname == NULL)
{
if (mpiPresent != 0)
{
char* fnameMPI = strdup("/tmp/contech_fe ");
fnameMPI[15] = '.';
snprintf(fnameMPI + 16, 5, "%d", mpiRank);
serialFile = fopen(fnameMPI, "wb");
free(fnameMPI);
}
else
{
serialFile = fopen("/tmp/contech_fe", "wb");
}
}
else
{
serialFile = fopen(fname, "wb");
}
if (serialFile == NULL)
{
fprintf(stderr, "Failure to open front-end stream for writing.\n");
if (fname == NULL) { fprintf(stderr, "\tCONTECH_FE_FILE unspecified\n");}
else {fprintf(stderr, "\tAttempted on %s\n", fname);}
exit(-1);
}
{
unsigned int id = 0;
ct_event_id ty = ct_event_version;
unsigned int version = CONTECH_EVENT_VERSION;
uint8_t* bb_info = _binary_contech_bin_start;
fwrite(&id, sizeof(unsigned int), 1, serialFile);
fwrite(&ty, sizeof(unsigned int), 1, serialFile);
fwrite(&version, sizeof(unsigned int), 1, serialFile);
fwrite(bb_info, sizeof(unsigned int), 1, serialFile);
totalWritten += 4 * sizeof(unsigned int);
{
size_t tl, wl;
unsigned int buf[2];
buf[0] = ct_event_rank;
buf[1] = mpiRank;
tl = 0;
do
{
wl = fwrite(&buf + tl, sizeof(unsigned int), 2 - tl, serialFile);
//if (wl > 0)
// wl is 0 on error, so it is safe to still add
tl += wl;
} while (tl < 2);
totalWritten += 2 * sizeof(unsigned int);
}
bb_info += 4; // skip the basic block count
while (bb_info != _binary_contech_bin_end)
{
// id, len, memop_0, ... memop_len-1
// Contech pass lays out the events in appropriate format
size_t tl = fwrite(bb_info, sizeof(char), _binary_contech_bin_end - bb_info, serialFile);
bb_info += tl;
totalWritten += tl;
}
}
// Main loop
// Write queued buffer to disk until program terminates
pthread_mutex_lock(&__ctQueueBufferLock);
do {
struct timespec ts;
clock_gettime(CLOCK_REALTIME, &ts);
ts.tv_sec += 30;
int condRetVal = 0;
// Check for queued buffer, i.e. is the program generating events
while (__ctQueuedBuffers == NULL && condRetVal == 0)
{
condRetVal = pthread_cond_timedwait(&__ctQueueSignal, &__ctQueueBufferLock, &ts);
}
if (condRetVal == 0)
//.........这里部分代码省略.........
示例11: pthread_mutex_lock
// timeout in milliseconds
OMX_ERRORTYPE COMXCoreComponent::WaitForEvent(OMX_EVENTTYPE eventType, long timeout)
{
#ifdef OMX_DEBUG_EVENTS
CLog::Log(LOGDEBUG, "COMXCoreComponent::WaitForEvent %s wait event 0x%08x\n",
m_componentName.c_str(), (int)eventType);
#endif
pthread_mutex_lock(&m_omx_event_mutex);
struct timespec endtime;
clock_gettime(CLOCK_REALTIME, &endtime);
add_timespecs(endtime, timeout);
while(true)
{
for (std::vector<omx_event>::iterator it = m_omx_events.begin(); it != m_omx_events.end(); it++)
{
omx_event event = *it;
#ifdef OMX_DEBUG_EVENTS
CLog::Log(LOGDEBUG, "COMXCoreComponent::WaitForEvent %s inlist event event.eEvent 0x%08x event.nData1 0x%08x event.nData2 %d\n",
m_componentName.c_str(), (int)event.eEvent, (int)event.nData1, (int)event.nData2);
#endif
if(event.eEvent == OMX_EventError && event.nData1 == (OMX_U32)OMX_ErrorSameState && event.nData2 == 1)
{
#ifdef OMX_DEBUG_EVENTS
CLog::Log(LOGDEBUG, "COMXCoreComponent::WaitForEvent %s remove event event.eEvent 0x%08x event.nData1 0x%08x event.nData2 %d\n",
m_componentName.c_str(), (int)event.eEvent, (int)event.nData1, (int)event.nData2);
#endif
m_omx_events.erase(it);
pthread_mutex_unlock(&m_omx_event_mutex);
return OMX_ErrorNone;
}
else if(event.eEvent == OMX_EventError)
{
m_omx_events.erase(it);
pthread_mutex_unlock(&m_omx_event_mutex);
return (OMX_ERRORTYPE)event.nData1;
}
else if(event.eEvent == eventType)
{
#ifdef OMX_DEBUG_EVENTS
CLog::Log(LOGDEBUG, "COMXCoreComponent::WaitForEvent %s remove event event.eEvent 0x%08x event.nData1 0x%08x event.nData2 %d\n",
m_componentName.c_str(), (int)event.eEvent, (int)event.nData1, (int)event.nData2);
#endif
m_omx_events.erase(it);
pthread_mutex_unlock(&m_omx_event_mutex);
return OMX_ErrorNone;
}
}
int retcode = pthread_cond_timedwait(&m_omx_event_cond, &m_omx_event_mutex, &endtime);
if (retcode != 0)
{
if (timeout > 0)
CLog::Log(LOGERROR, "COMXCoreComponent::WaitForEvent %s wait event 0x%08x timeout %ld\n",
m_componentName.c_str(), (int)eventType, timeout);
pthread_mutex_unlock(&m_omx_event_mutex);
return OMX_ErrorMax;
}
}
pthread_mutex_unlock(&m_omx_event_mutex);
return OMX_ErrorNone;
}
示例12: Java_org_videolan_libvlc_LibVLC_getThumbnail
//.........这里部分代码省略.........
}
/* Compute the size parameters of the frame to generate. */
unsigned thumbWidth = frameWidth;
unsigned thumbHeight = frameHeight;
const float inputAR = (float)videoWidth / videoHeight;
const float screenAR = (float)frameWidth / frameHeight;
/* Most of the cases, video is wider than tall */
if (screenAR < inputAR)
{
thumbHeight = (float)frameWidth / inputAR + 1;
sys->blackBorders = ( (frameHeight - thumbHeight) / 2 ) * frameWidth;
}
else
{
LOGD("Weird aspect Ratio.\n");
thumbWidth = (float)frameHeight * inputAR;
sys->blackBorders = (frameWidth - thumbWidth) / 2;
}
sys->thumbPitch = thumbWidth * PIXEL_SIZE;
sys->thumbHeight = thumbHeight;
sys->frameWidth = frameWidth;
/* Allocate the memory to store the frames. */
size_t thumbSize = sys->thumbPitch * (sys->thumbHeight+1);
sys->thumbData = malloc(thumbSize);
if (sys->thumbData == NULL)
{
LOGE("Could not allocate the memory to store the frame!");
goto end;
}
/* Allocate the memory to store the thumbnail. */
unsigned frameSize = frameWidth * frameHeight * PIXEL_SIZE;
sys->frameData = calloc(frameSize, 1);
if (sys->frameData == NULL)
{
LOGE("Could not allocate the memory to store the thumbnail!");
goto end;
}
/* Set the video format and the callbacks. */
libvlc_video_set_format(mp, "RGBA", thumbWidth, thumbHeight, sys->thumbPitch);
libvlc_video_set_callbacks(mp, thumbnailer_lock, thumbnailer_unlock,
NULL, (void*)sys);
sys->state = THUMB_SEEKING;
/* Play the media. */
libvlc_media_player_play(mp);
libvlc_media_player_set_position(mp, THUMBNAIL_POSITION);
const int wait_time = 50000;
const int max_attempts = 100;
for (int i = 0; i < max_attempts; ++i) {
if (libvlc_media_player_is_playing(mp) && libvlc_media_player_get_position(mp) >= THUMBNAIL_POSITION)
break;
usleep(wait_time);
}
/* Wait for the thumbnail to be generated. */
pthread_mutex_lock(&sys->doneMutex);
sys->state = THUMB_SEEKED;
struct timespec deadline;
clock_gettime(CLOCK_REALTIME, &deadline);
deadline.tv_sec += 10; /* amount of seconds before we abort thumbnailer */
do {
int ret = pthread_cond_timedwait(&sys->doneCondVar, &sys->doneMutex, &deadline);
if (ret == ETIMEDOUT)
break;
} while (sys->state != THUMB_DONE);
pthread_mutex_unlock(&sys->doneMutex);
/* Stop and release the media player. */
libvlc_media_player_stop(mp);
libvlc_media_player_release(mp);
if (sys->state == THUMB_DONE) {
/* Create the Java byte array to return the create thumbnail. */
byteArray = (*env)->NewByteArray(env, frameSize);
if (byteArray == NULL)
{
LOGE("Could not allocate the Java byte array to store the frame!");
goto end;
}
(*env)->SetByteArrayRegion(env, byteArray, 0, frameSize,
(jbyte *)sys->frameData);
}
end:
pthread_mutex_destroy(&sys->doneMutex);
pthread_cond_destroy(&sys->doneCondVar);
free(sys->frameData);
free(sys->thumbData);
free(sys);
enomem:
return byteArray;
}
示例13: alcnd_timedwait
int alcnd_timedwait(alcnd_t *cond, almtx_t *mtx, const struct timespec *time_point)
{
if(pthread_cond_timedwait(cond, mtx, time_point) == 0)
return althrd_success;
return althrd_error;
}
示例14: pthread_mutex_lock
//.........这里部分代码省略.........
y = (int)(progress.background_w-progress.inactive_end_w);
} else {
y = (int)target_width;
}
} else {
if(target_width >= (progress.background_w-progress.inactive_end_w)) {
y = (int)(progress.background_w-progress.inactive_end_w);
} else {
y = (int)target_width;
}
}
if(x < y) {
while(x < y) {
if(nodaemon == 0 && black == 0) {
if(progress_active == 0) {
if(strcmp(progress.active_fill_e, "jpg") == 0) {
draw_jpg(progress.background_x+x, progress.background_y, progress.background_z, progress.active_fill_img);
} else if(strcmp(progress.active_fill_e, "png") == 0) {
draw_png(progress.background_x+x, progress.background_y, progress.background_z, progress.active_fill_img);
}
if(percentage == -1) {
usleep((__useconds_t)(speed*progress.active_fill_w));
}
} else {
if(strcmp(progress.inactive_fill_e, "jpg") == 0) {
draw_jpg(progress.background_x+x, progress.background_y, progress.background_z, progress.inactive_fill_img);
} else if(strcmp(progress.inactive_fill_e, "png") == 0) {
draw_png(progress.background_x+x, progress.background_y, progress.background_z, progress.inactive_fill_img);
}
if(percentage == -1) {
usleep((__useconds_t)(speed*progress.inactive_fill_w));
}
}
}
x++;
}
}
if(x < target_width) {
if(nodaemon == 0 && black == 0) {
if(progress_active == 0) {
if(strcmp(progress.active_end_e, "jpg") == 0) {
draw_jpg(progress.background_x+x, progress.background_y, progress.background_z, progress.active_end_img);
} else if(strcmp(progress.active_end_e, "png") == 0) {
draw_png(progress.background_x+x, progress.background_y, progress.background_z, progress.active_end_img);
}
} else {
if(strcmp(progress.inactive_end_e, "jpg") == 0) {
draw_jpg(progress.background_x+x, progress.background_y, progress.background_z, progress.inactive_end_img);
} else if(strcmp(progress.inactive_end_e, "png") == 0) {
draw_png(progress.background_x+x, progress.background_y, progress.background_z, progress.inactive_end_img);
}
}
x = (int)target_width;
}
}
if(x == progress.background_w) {
progress_active ^= 1;
x = 0;
}
if(infinite == 0) {
if(new_percentage > -1) {
percentage = new_percentage;
new_percentage = -1;
target_width = (int)(progress.background_w * (((double)percentage/100)));
x = 0;
} else if(percentage < last_percentage && last_percentage != 100) {
new_percentage = percentage;
target_width = (int)progress.background_w;
x = 0;
} else {
draw_loop = 0;
}
last_percentage = percentage;
}
pthread_mutex_unlock(&progress_lock);
}
} else {
if(percentage > 0 && percentage < 100) {
struct timeval tp;
struct timespec ts;
gettimeofday(&tp, NULL);
ts.tv_sec = tp.tv_sec;
ts.tv_nsec = tp.tv_usec * 1000;
ts.tv_sec += 1;
n = pthread_cond_timedwait(&progress_signal, &progress_lock, &ts);
if(n == ETIMEDOUT) {
percentage++;
draw_loop = 1;
}
} else {
n = pthread_cond_wait(&progress_signal, &progress_lock);
}
}
}
return (void *)NULL;
}
示例15: LOC_LOGD
static void *timer_thread(void *thread_data)
{
int ret = -ETIMEDOUT;
struct timespec ts;
struct timeval tv;
timer_data* t = (timer_data*)thread_data;
LOC_LOGD("%s:%d]: Enter. Delay = %d\n", __func__, __LINE__, t->time_msec);
gettimeofday(&tv, NULL);
clock_gettime(CLOCK_REALTIME, &ts);
if(t->time_msec >= 1000) {
ts.tv_sec += t->time_msec/1000;
t->time_msec = t->time_msec % 1000;
}
if(t->time_msec)
ts.tv_nsec += t->time_msec * 1000000;
if(ts.tv_nsec > 999999999) {
LOC_LOGD("%s:%d]: Large nanosecs\n", __func__, __LINE__);
ts.tv_sec += 1;
ts.tv_nsec -= 1000000000;
}
LOC_LOGD("%s:%d]: ts.tv_sec:%d; ts.tv_nsec:%d\n"
"\t Current time: %d sec; %d nsec",
__func__, __LINE__, (int)ts.tv_sec, (int)ts.tv_nsec,
(int)tv.tv_sec, (int)tv.tv_usec*1000);
pthread_mutex_lock(&(t->timer_mutex));
if (READY == t->state) {
t->state = WAITING;
ret = pthread_cond_timedwait(&t->timer_cond, &t->timer_mutex, &ts);
t->state = DONE;
}
pthread_mutex_unlock(&(t->timer_mutex));
switch (ret) {
case ETIMEDOUT:
LOC_LOGV("%s:%d]: loc_timer timed out", __func__, __LINE__);
break;
case 0:
LOC_LOGV("%s:%d]: loc_timer stopped", __func__, __LINE__);
break;
case -ETIMEDOUT:
LOC_LOGV("%s:%d]: loc_timer cancelled", __func__, __LINE__);
break;
default:
LOC_LOGE("%s:%d]: Call to pthread timedwait failed; ret=%d\n",
__func__, __LINE__, ret);
break;
}
if(ETIMEDOUT == ret)
t->callback_func(t->user_data, ret);
// A (should be rare) race condition is that, when the loc_time_stop is called
// and acquired mutex, we reach here. pthread_mutex_destroy will fail with
// error code EBUSY. We give it 6 tries in 5 seconds. Should be eanough time
// for loc_timer_stop to complete. With the 7th try, we also perform unlock
// prior to destroy.
{
int i;
for (i = 0; EBUSY == pthread_mutex_destroy(&t->timer_mutex) && i <= 5; i++) {
if (i < 5) {
sleep(1);
} else {
// nah, forget it, something is seriously wrong. Mutex has been
// held too long. Unlock the mutext here.
pthread_mutex_unlock(&t->timer_mutex);
}
}
}
pthread_cond_destroy(&t->timer_cond);
free(t);
LOC_LOGD("%s:%d]: Exit\n", __func__, __LINE__);
return NULL;
}