本文整理汇总了C++中pj_mutex_lock函数的典型用法代码示例。如果您正苦于以下问题:C++ pj_mutex_lock函数的具体用法?C++ pj_mutex_lock怎么用?C++ pj_mutex_lock使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pj_mutex_lock函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: PJ_DEF
/*
* pj_ioqueue_unregister()
*/
PJ_DEF(pj_status_t) pj_ioqueue_unregister( pj_ioqueue_key_t *key )
{
unsigned i;
pj_bool_t has_lock;
enum { RETRY = 10 };
PJ_ASSERT_RETURN(key, PJ_EINVAL);
#if PJ_HAS_TCP
if (key->connecting) {
unsigned pos;
pj_ioqueue_t *ioqueue;
ioqueue = key->ioqueue;
/* Erase from connecting_handles */
pj_lock_acquire(ioqueue->lock);
for (pos=0; pos < ioqueue->connecting_count; ++pos) {
if (ioqueue->connecting_keys[pos] == key) {
erase_connecting_socket(ioqueue, pos);
break;
}
}
key->connecting = 0;
pj_lock_release(ioqueue->lock);
}
#endif
#if PJ_IOQUEUE_HAS_SAFE_UNREG
/* Mark key as closing before closing handle. */
key->closing = 1;
/* If concurrency is disabled, wait until the key has finished
* processing the callback
*/
if (key->allow_concurrent == PJ_FALSE) {
pj_mutex_lock(key->mutex);
has_lock = PJ_TRUE;
} else {
has_lock = PJ_FALSE;
}
#else
PJ_UNUSED_ARG(has_lock);
#endif
/* Close handle (the only way to disassociate handle from IOCP).
* We also need to close handle to make sure that no further events
* will come to the handle.
*/
/* Update 2008/07/18 (http://trac.pjsip.org/repos/ticket/575):
* - It seems that CloseHandle() in itself does not actually close
* the socket (i.e. it will still appear in "netstat" output). Also
* if we only use CloseHandle(), an "Invalid Handle" exception will
* be raised in WSACleanup().
* - MSDN documentation says that CloseHandle() must be called after
* closesocket() call (see
* http://msdn.microsoft.com/en-us/library/ms724211(VS.85).aspx).
* But turns out that this will raise "Invalid Handle" exception
* in debug mode.
* So because of this, we replaced CloseHandle() with closesocket()
* instead. These was tested on WinXP SP2.
*/
//CloseHandle(key->hnd);
pj_sock_close((pj_sock_t)key->hnd);
/* Reset callbacks */
key->cb.on_accept_complete = NULL;
key->cb.on_connect_complete = NULL;
key->cb.on_read_complete = NULL;
key->cb.on_write_complete = NULL;
#if PJ_IOQUEUE_HAS_SAFE_UNREG
/* Even after handle is closed, I suspect that IOCP may still try to
* do something with the handle, causing memory corruption when pool
* debugging is enabled.
*
* Forcing context switch seems to have fixed that, but this is quite
* an ugly solution..
*
* Update 2008/02/13:
* This should not happen if concurrency is disallowed for the key.
* So at least application has a solution for this (i.e. by disallowing
* concurrency in the key).
*/
//This will loop forever if unregistration is done on the callback.
//Doing this with RETRY I think should solve the IOCP setting the
//socket signalled, without causing the deadlock.
//while (pj_atomic_get(key->ref_count) != 1)
// pj_thread_sleep(0);
for (i=0; pj_atomic_get(key->ref_count) != 1 && i<RETRY; ++i)
pj_thread_sleep(0);
/* Decrement reference counter to destroy the key. */
decrement_counter(key);
if (has_lock)
pj_mutex_unlock(key->mutex);
//.........这里部分代码省略.........
示例2: proxy_on_rx_request
//.........这里部分代码省略.........
return PJ_TRUE;
}
/* Feed the request to the UAS transaction to drive it's state
* out of NULL state.
*/
pjsip_tsx_recv_msg(uas_tsx, rdata);
/* Attach a data to the UAC transaction, to be used to find the
* UAS transaction when we receive response in the UAC side.
*/
uac_data = (struct uac_data*)
pj_pool_alloc(uac_tsx->pool, sizeof(struct uac_data));
uac_data->uas_tsx = uas_tsx;
uac_tsx->mod_data[mod_tu.id] = (void*)uac_data;
/* Attach data to the UAS transaction, to find the UAC transaction
* when cancelling INVITE request.
*/
uas_data = (struct uas_data*)
pj_pool_alloc(uas_tsx->pool, sizeof(struct uas_data));
uas_data->uac_tsx = uac_tsx;
uas_tsx->mod_data[mod_tu.id] = (void*)uas_data;
/* Everything is setup, forward the request */
status = pjsip_tsx_send_msg(uac_tsx, tdata);
if (status != PJ_SUCCESS) {
pjsip_tx_data *err_res;
/* Fail to send request, for some reason */
/* Destroy transmit data */
pjsip_tx_data_dec_ref(tdata);
/* I think UAC transaction should have been destroyed when
* it fails to send request, so no need to destroy it.
pjsip_tsx_terminate(uac_tsx, PJSIP_SC_INTERNAL_SERVER_ERROR);
*/
/* Send 500/Internal Server Error to UAS transaction */
pjsip_endpt_create_response(global.endpt, rdata,
500, NULL, &err_res);
pjsip_tsx_send_msg(uas_tsx, err_res);
return PJ_TRUE;
}
/* Send 100/Trying if this is an INVITE */
if (rdata->msg_info.msg->line.req.method.id == PJSIP_INVITE_METHOD) {
pjsip_tx_data *res100;
pjsip_endpt_create_response(global.endpt, rdata, 100, NULL,
&res100);
pjsip_tsx_send_msg(uas_tsx, res100);
}
} else {
/* This is CANCEL request */
pjsip_transaction *invite_uas;
struct uas_data *uas_data;
pj_str_t key;
/* Find the UAS INVITE transaction */
pjsip_tsx_create_key(rdata->tp_info.pool, &key, PJSIP_UAS_ROLE,
pjsip_get_invite_method(), rdata);
invite_uas = pjsip_tsx_layer_find_tsx(&key, PJ_TRUE);
if (!invite_uas) {
/* Invite transaction not found, respond CANCEL with 481 */
pjsip_endpt_respond_stateless(global.endpt, rdata, 481, NULL,
NULL, NULL);
return PJ_TRUE;
}
/* Respond 200 OK to CANCEL */
pjsip_endpt_respond(global.endpt, NULL, rdata, 200, NULL, NULL,
NULL, NULL);
/* Send CANCEL to cancel the UAC transaction.
* The UAS INVITE transaction will get final response when
* we receive final response from the UAC INVITE transaction.
*/
uas_data = (struct uas_data*) invite_uas->mod_data[mod_tu.id];
if (uas_data->uac_tsx && uas_data->uac_tsx->status_code < 200) {
pjsip_tx_data *cancel;
pj_mutex_lock(uas_data->uac_tsx->mutex);
pjsip_endpt_create_cancel(global.endpt, uas_data->uac_tsx->last_tx,
&cancel);
pjsip_endpt_send_request(global.endpt, cancel, -1, NULL, NULL);
pj_mutex_unlock(uas_data->uac_tsx->mutex);
}
/* Unlock UAS tsx because it is locked in find_tsx() */
pj_mutex_unlock(invite_uas->mutex);
}
return PJ_TRUE;
}
示例3: PJ_DEF
/*
* pj_ioqueue_recvfrom()
*
* Start asynchronous recvfrom() from the socket.
*/
PJ_DEF(pj_status_t) pj_ioqueue_recvfrom( pj_ioqueue_key_t *key,
pj_ioqueue_op_key_t *op_key,
void *buffer,
pj_ssize_t *length,
unsigned flags,
pj_sockaddr_t *addr,
int *addrlen)
{
struct read_operation *read_op;
PJ_ASSERT_RETURN(key && op_key && buffer && length, PJ_EINVAL);
PJ_CHECK_STACK();
/* Check if key is closing. */
if (IS_CLOSING(key))
return PJ_ECANCELLED;
read_op = (struct read_operation*)op_key;
read_op->op = PJ_IOQUEUE_OP_NONE;
/* Try to see if there's data immediately available.
*/
if ((flags & PJ_IOQUEUE_ALWAYS_ASYNC) == 0) {
pj_status_t status;
pj_ssize_t size;
size = *length;
status = pj_sock_recvfrom(key->fd, buffer, &size, flags,
addr, addrlen);
if (status == PJ_SUCCESS) {
/* Yes! Data is available! */
*length = size;
return PJ_SUCCESS;
} else {
/* If error is not EWOULDBLOCK (or EAGAIN on Linux), report
* the error to caller.
*/
if (status != PJ_STATUS_FROM_OS(PJ_BLOCKING_ERROR_VAL))
return status;
}
}
flags &= ~(PJ_IOQUEUE_ALWAYS_ASYNC);
/*
* No data is immediately available.
* Must schedule asynchronous operation to the ioqueue.
*/
read_op->op = PJ_IOQUEUE_OP_RECV_FROM;
read_op->buf = buffer;
read_op->size = *length;
read_op->flags = flags;
read_op->rmt_addr = addr;
read_op->rmt_addrlen = addrlen;
pj_mutex_lock(key->mutex);
pj_list_insert_before(&key->read_list, read_op);
ioqueue_add_to_set(key->ioqueue, key, READABLE_EVENT);
pj_mutex_unlock(key->mutex);
return PJ_EPENDING;
}
示例4: ioqueue_dispatch_write_event
/*
* ioqueue_dispatch_event()
*
* Report occurence of an event in the key to be processed by the
* framework.
*/
void ioqueue_dispatch_write_event(pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h)
{
/* Lock the key. */
pj_mutex_lock(h->mutex);
if (IS_CLOSING(h)) {
pj_mutex_unlock(h->mutex);
return;
}
#if defined(PJ_HAS_TCP) && PJ_HAS_TCP!=0
if (h->connecting) {
/* Completion of connect() operation */
pj_ssize_t bytes_transfered;
pj_bool_t has_lock;
/* Clear operation. */
h->connecting = 0;
ioqueue_remove_from_set(ioqueue, h, WRITEABLE_EVENT);
ioqueue_remove_from_set(ioqueue, h, EXCEPTION_EVENT);
#if (defined(PJ_HAS_SO_ERROR) && PJ_HAS_SO_ERROR!=0)
/* from connect(2):
* On Linux, use getsockopt to read the SO_ERROR option at
* level SOL_SOCKET to determine whether connect() completed
* successfully (if SO_ERROR is zero).
*/
{
int value;
int vallen = sizeof(value);
int gs_rc = pj_sock_getsockopt(h->fd, SOL_SOCKET, SO_ERROR,
&value, &vallen);
if (gs_rc != 0) {
/* Argh!! What to do now???
* Just indicate that the socket is connected. The
* application will get error as soon as it tries to use
* the socket to send/receive.
*/
bytes_transfered = 0;
} else {
bytes_transfered = value;
}
}
#elif defined(PJ_WIN32) && PJ_WIN32!=0
bytes_transfered = 0; /* success */
#else
/* Excellent information in D.J. Bernstein page:
* http://cr.yp.to/docs/connect.html
*
* Seems like the most portable way of detecting connect()
* failure is to call getpeername(). If socket is connected,
* getpeername() will return 0. If the socket is not connected,
* it will return ENOTCONN, and read(fd, &ch, 1) will produce
* the right errno through error slippage. This is a combination
* of suggestions from Douglas C. Schmidt and Ken Keys.
*/
{
int gp_rc;
struct sockaddr_in addr;
socklen_t addrlen = sizeof(addr);
gp_rc = getpeername(h->fd, (struct sockaddr*)&addr, &addrlen);
bytes_transfered = (gp_rc < 0) ? gp_rc : -gp_rc;
}
#endif
/* Unlock; from this point we don't need to hold key's mutex
* (unless concurrency is disabled, which in this case we should
* hold the mutex while calling the callback) */
if (h->allow_concurrent) {
/* concurrency may be changed while we're in the callback, so
* save it to a flag.
*/
has_lock = PJ_FALSE;
pj_mutex_unlock(h->mutex);
} else {
has_lock = PJ_TRUE;
}
/* Call callback. */
if (h->cb.on_connect_complete && !IS_CLOSING(h))
(*h->cb.on_connect_complete)(h, bytes_transfered);
/* Unlock if we still hold the lock */
if (has_lock) {
pj_mutex_unlock(h->mutex);
}
/* Done. */
} else
#endif /* PJ_HAS_TCP */
//.........这里部分代码省略.........
示例5: open_openh264_codec
static pj_status_t open_openh264_codec(openh264_private *ff,
pj_mutex_t *ff_mutex)
{
pjmedia_video_format_detail *vfd;
pj_bool_t enc_opened = PJ_FALSE, dec_opened = PJ_FALSE;
pj_status_t status;
vfd = pjmedia_format_get_video_format_detail(&ff->param.enc_fmt,
PJ_TRUE);
/* Override generic params or apply specific params before opening
* the codec.
*/
if (ff->desc->preopen) {
status = (*ff->desc->preopen)(ff);
if (status != PJ_SUCCESS)
goto on_error;
}
/* Open encoder */
if (ff->param.dir & PJMEDIA_DIR_ENCODING) {
int err;
SEncParamExt *param = &ff->enc_param;
const openh264_codec_desc *desc = &ff->desc[0];
bool disable = 0;
int iIndexLayer = 0;
SSourcePicture *srcPic;
pj_mutex_lock(ff_mutex);
memset(param, 0x00, sizeof(SEncParamExt));
CreateSVCEncoder(&ff->enc);
/* Test for temporal, spatial, SNR scalability */
param->fMaxFrameRate = (float)vfd->fps.num; // input frame rate
param->iPicWidth = vfd->size.w; // width of picture in samples
param->iPicHeight = vfd->size.h; // height of picture in samples
param->iTargetBitrate = desc->avg_bps; // target bitrate desired
param->bEnableRc = PJ_TRUE; // rc mode control
param->iTemporalLayerNum = 3; // layer number at temporal level
param->iSpatialLayerNum = 1; // layer number at spatial level
param->bEnableDenoise = PJ_TRUE; // denoise control
param->bEnableBackgroundDetection = PJ_TRUE; // background detection control
param->bEnableAdaptiveQuant = PJ_TRUE; // adaptive quantization control
param->bEnableFrameSkip = PJ_TRUE; // frame skipping
param->bEnableLongTermReference = PJ_FALSE; // long term reference control
param->bEnableFrameCroppingFlag = PJ_FALSE;
param->iLoopFilterDisableIdc = 0;
param->iInputCsp = videoFormatI420; // color space of input sequence
param->uiIntraPeriod = 300; // period of Intra frame
param->bEnableSpsPpsIdAddition = 0;
param->bPrefixNalAddingCtrl = 0;
param->sSpatialLayers[iIndexLayer].iVideoWidth = vfd->size.w;
param->sSpatialLayers[iIndexLayer].iVideoHeight = vfd->size.h;
param->sSpatialLayers[iIndexLayer].fFrameRate = (float)vfd->fps.num;
param->sSpatialLayers[iIndexLayer].iSpatialBitrate = desc->avg_bps;
// param->sSpatialLayers[iIndexLayer].iDLayerQp = 50;
param->sSpatialLayers[iIndexLayer].uiProfileIdc = 66;
param->sSpatialLayers[iIndexLayer].sSliceCfg.uiSliceMode = 4;
param->sSpatialLayers[iIndexLayer].sSliceCfg.sSliceArgument.uiSliceSizeConstraint = PJMEDIA_MAX_VID_PAYLOAD_SIZE;
err = callWelsEncoderFn(ff->enc)->InitializeExt(ff->enc, param);
if (err == cmResultSuccess)
{
callWelsEncoderFn(ff->enc)->SetOption(ff->enc, ENCODER_OPTION_ENABLE_SSEI, &disable);
enc_opened = PJ_TRUE;
}
srcPic = malloc(sizeof(SSourcePicture));
memset(srcPic, 0x00, sizeof(SSourcePicture));
srcPic->iColorFormat = param->iInputCsp;
srcPic->iPicWidth = param->iPicWidth;
srcPic->iPicHeight = param->iPicHeight;
srcPic->iStride[0] = param->iPicWidth;
srcPic->iStride[1] = param->iPicWidth / 2;
srcPic->iStride[2] = param->iPicWidth / 2;
ff->srcPic = srcPic;
pj_mutex_unlock(ff_mutex);
}
/* Open decoder */
if (ff->param.dir & PJMEDIA_DIR_DECODING) {
SDecodingParam sDecParam = {0};
pj_mutex_lock(ff_mutex);
CreateDecoder(&ff->dec);
sDecParam.iOutputColorFormat = videoFormatI420;
sDecParam.uiTargetDqLayer = (unsigned char)-1;
sDecParam.uiEcActiveFlag = 1;
sDecParam.sVideoProperty.eVideoBsType = VIDEO_BITSTREAM_DEFAULT;
callWelsDecoderFn(ff->dec)->Initialize(ff->dec, &sDecParam);
pj_mutex_unlock(ff_mutex);
dec_opened = PJ_TRUE;
}
/* Let the codec apply specific params after the codec opened */
//.........这里部分代码省略.........
示例6: PJ_DEF
/*
* Get default codec parameter.
*/
PJ_DEF(pj_status_t) pjmedia_codec_mgr_get_default_param( pjmedia_codec_mgr *mgr,
const pjmedia_codec_info *info,
pjmedia_codec_param *param )
{
pjmedia_codec_factory *factory;
pj_status_t status;
pjmedia_codec_id codec_id;
struct pjmedia_codec_desc *codec_desc = NULL;
unsigned i;
PJ_ASSERT_RETURN(mgr && info && param, PJ_EINVAL);
if (!pjmedia_codec_info_to_id(info, (char*)&codec_id, sizeof(codec_id)))
return PJ_EINVAL;
pj_mutex_lock(mgr->mutex);
/* First, lookup default param in codec desc */
for (i=0; i < mgr->codec_cnt; ++i) {
if (pj_ansi_stricmp(codec_id, mgr->codec_desc[i].id) == 0) {
codec_desc = &mgr->codec_desc[i];
break;
}
}
/* If we found the codec and its default param is set, return it */
if (codec_desc && codec_desc->param) {
pj_assert(codec_desc->param->param);
pj_memcpy(param, codec_desc->param->param,
sizeof(pjmedia_codec_param));
pj_mutex_unlock(mgr->mutex);
return PJ_SUCCESS;
}
/* Otherwise query the default param from codec factory */
factory = mgr->factory_list.next;
while (factory != &mgr->factory_list) {
if ( (*factory->op->test_alloc)(factory, info) == PJ_SUCCESS ) {
status = (*factory->op->default_attr)(factory, info, param);
if (status == PJ_SUCCESS) {
/* Check for invalid max_bps. */
if (param->info.max_bps < param->info.avg_bps)
param->info.max_bps = param->info.avg_bps;
pj_mutex_unlock(mgr->mutex);
return PJ_SUCCESS;
}
}
factory = factory->next;
}
pj_mutex_unlock(mgr->mutex);
return PJMEDIA_CODEC_EUNSUP;
}
示例7: printf
int CChannel::sendto(const sockaddr* addr, CPacket& packet) const
{
// convert control information into network order
if (packet.getFlag()) {
for (int i = 0, n = packet.getLength() / 4; i < n; ++ i)
*((uint32_t *)packet.m_pcData + i) = htonl(*((uint32_t *)packet.m_pcData + i));
}
uint32_t* p = packet.m_nHeader;
for (int j = 0; j < 4; ++ j)
{
*p = htonl(*p);
++ p;
}
#ifdef DEBUGP
//dump ctrl packet
printf("\nSend Header:\n");
dumpHex((char *)packet.m_PacketVector[0].iov_base, packet.m_PacketVector[0].iov_len);
char *bb = (char *)packet.m_PacketVector[0].iov_base;
if(bb[0]&0x80) {
printf("Data:\n");
dumpHex((char *)packet.m_PacketVector[1].iov_base, packet.m_PacketVector[1].iov_len);
printf("================\n");
}
#endif
int res = -1;
unsigned size;
unsigned len;
natnl_hdr hdr = {0xff, 0x00, 0x0000};
int is_tnl_data = 0;
pj_thread_desc desc;
pj_thread_t *thread = 0;
if(m_iSocket == -1) {
pjsua_call *call = (pjsua_call *)m_call;
if(call == NULL) return -1;
// DEAN, prevent assert fail while garbage collector remove UDT socket on multiple instance.
if (!pj_thread_is_registered(call->inst_id)) {
int status = pj_thread_register(call->inst_id, "CChannel::sendto", desc, &thread );
if (status != PJ_SUCCESS)
return -1;
}
pj_mutex_lock(call->tnl_stream_lock2);
natnl_stream *stream = (natnl_stream *)call->tnl_stream;
if(stream == NULL) {
pj_mutex_unlock(call->tnl_stream_lock2);
return -1;
}
size = CPacket::m_iPktHdrSize + packet.getLength() + sizeof(natnl_hdr);
len = (CPacket::m_iPktHdrSize + packet.getLength());
hdr.length = htons(len);
memcpy((char *)&m_pktBuffer[sizeof(natnl_hdr)], packet.m_PacketVector[0].iov_base, packet.m_PacketVector[0].iov_len);
memcpy((char *)&m_pktBuffer[packet.m_PacketVector[0].iov_len+sizeof(natnl_hdr)], packet.m_PacketVector[1].iov_base, packet.m_PacketVector[1].iov_len);
memcpy((char *)&m_pktBuffer[0], &hdr, sizeof(natnl_hdr));
resend:
// DEAN, check if this is tunnel data. If true, update last_data time.
is_tnl_data = pjmedia_natnl_udt_packet_is_tnl_data(&m_pktBuffer[0], size);
pj_assert(size < sizeof(m_pktBuffer));
((pj_uint8_t*)m_pktBuffer)[size] = 0; // tunnel data flag off
if (is_tnl_data) {
pj_get_timestamp(&stream->last_data); // DEAN save current time
((pj_uint8_t*)m_pktBuffer)[size] = 1; // tunnel data flag on
}
res = pjmedia_transport_send_rtp(stream->med_tp, m_pktBuffer, size); // +Roger modified - stream pointer to med_tp
#if 0 // No need to resend it, because UDT will handle this.
if(res == 70011) { //EAGAIN
m_pTimer->sleepto(50000); //sleep for 50 us
goto resend;
}
#endif
pj_mutex_unlock(call->tnl_stream_lock2);
}
res = (0 == res) ? size : -1;
// convert back into local host order
//for (int k = 0; k < 4; ++ k)
// packet.m_nHeader[k] = ntohl(packet.m_nHeader[k]);
p = packet.m_nHeader;
for (int k = 0; k < 4; ++ k)
{
*p = ntohl(*p);
++ p;
}
if (packet.getFlag())
{
for (int l = 0, n = packet.getLength() / 4; l < n; ++ l)
//.........这里部分代码省略.........
示例8: pj_thread_register
int CChannel::recvfrom(sockaddr* addr, CPacket& packet) const
{
int res = -1;
recv_buff *rb = NULL;
pj_thread_desc desc;
pj_thread_t *thread = 0;
if (m_iSocket == -1) {
pjsua_call *call = (pjsua_call *)m_call;
if(call == NULL)
return -1;
if(call->tnl_stream==NULL)
return -1;
// DEAN, prevent assert fail while garbage collector remove UDT socket on multiple instance.
if (!pj_thread_is_registered(call->inst_id)) {
int status = pj_thread_register(call->inst_id, "CChannel::recvfrom", desc, &thread );
if (status != PJ_SUCCESS)
return -1;
}
pj_mutex_lock(call->tnl_stream_lock3);
natnl_stream *stream = (natnl_stream *)call->tnl_stream;
//get data from rBuff
if (stream == NULL) {
pj_mutex_unlock(call->tnl_stream_lock3);
return -1;
}
// charles CHARLES
// DEAN commeted, for using pj_sem_try_wait2
//pj_mutex_unlock(call->tnl_stream_lock3);
//pj_sem_wait(stream->rbuff_sem);
pj_sem_trywait2(stream->rbuff_sem);
//pj_mutex_lock(call->tnl_stream_lock3);
pj_mutex_lock(stream->rbuff_mutex);
if (!pj_list_empty(&stream->rbuff)) {
rb = stream->rbuff.next;
stream->rbuff_cnt--;
//PJ_LOG(4, ("channel.cpp", "rbuff_cnt=%d", stream->rbuff_cnt));
pj_list_erase(rb);
/*if (rb->len > 0 &&
((pj_uint32_t *)rb->buff)[0] == NO_CTL_SESS_MGR_HEADER_MAGIC) { // check the magic
char *data = (char *)&rb->buff[sizeof(NO_CTL_SESS_MGR_HEADER_MAGIC)];
int len = rb->len - sizeof(NO_CTL_SESS_MGR_HEADER_MAGIC);
natnl_handle_recv_msg(call->index, call->tnl_stream->med_tp, data, len);
} else */if (!check_packet_integrity(rb)) {
int ds = UMIN(packet.m_PacketVector[1].iov_len, rb->len - sizeof(natnl_hdr) - CPacket::m_iPktHdrSize);
memcpy(packet.m_PacketVector[0].iov_base, &rb->buff[sizeof(natnl_hdr)], packet.m_PacketVector[0].iov_len);
memcpy(packet.m_PacketVector[1].iov_base, &rb->buff[packet.m_PacketVector[0].iov_len+sizeof(natnl_hdr)], ds);
res = rb->len - sizeof(natnl_hdr);
}
}
pj_mutex_unlock(stream->rbuff_mutex);
if (rb != NULL) {
#if 1
//move rb to gcbuff
pj_mutex_lock(stream->gcbuff_mutex);
pj_list_push_back(&stream->gcbuff, rb);
pj_mutex_unlock(stream->gcbuff_mutex);
#else
free(rb);
rb = NULL;
#endif
}
pj_mutex_unlock(call->tnl_stream_lock3);
}
if (res <= 0)
{
packet.setLength(-1);
return -1;
}
packet.setLength(res - CPacket::m_iPktHdrSize);
#ifdef DEBUGP
printf("\nRecv Header:\n");
dumpHex((char *)packet.m_PacketVector[0].iov_base, packet.m_PacketVector[0].iov_len);
char *bb = (char *)packet.m_PacketVector[0].iov_base;
if(bb[0]&0x80) {
printf("Data:\n");
dumpHex((char *)packet.m_PacketVector[1].iov_base, packet.m_PacketVector[1].iov_len);
printf("================\n");
}
#endif
// convert back into local host order
//for (int i = 0; i < 4; ++ i)
// packet.m_nHeader[i] = ntohl(packet.m_nHeader[i]);
uint32_t* p = packet.m_nHeader;
for (int i = 0; i < 4; ++ i)
{
*p = ntohl(*p);
++ p;
}
//.........这里部分代码省略.........
示例9: on_request_complete
/*
* Callback upon request completion.
*/
static void on_request_complete(pj_stun_session *stun_sess,
pj_status_t status,
void *token,
pj_stun_tx_data *tdata,
const pj_stun_msg *response,
const pj_sockaddr_t *src_addr,
unsigned src_addr_len)
{
nat_detect_session *sess;
pj_stun_sockaddr_attr *mattr = NULL;
pj_stun_changed_addr_attr *ca = NULL;
pj_uint32_t *tsx_id;
int cmp;
unsigned test_id;
PJ_UNUSED_ARG(token);
PJ_UNUSED_ARG(tdata);
PJ_UNUSED_ARG(src_addr);
PJ_UNUSED_ARG(src_addr_len);
sess = (nat_detect_session*) pj_stun_session_get_user_data(stun_sess);
pj_mutex_lock(sess->mutex);
/* Find errors in the response */
if (status == PJ_SUCCESS) {
/* Check error message */
if (PJ_STUN_IS_ERROR_RESPONSE(response->hdr.type)) {
pj_stun_errcode_attr *eattr;
int err_code;
eattr = (pj_stun_errcode_attr*)
pj_stun_msg_find_attr(response, PJ_STUN_ATTR_ERROR_CODE, 0);
if (eattr != NULL)
err_code = eattr->err_code;
else
err_code = PJ_STUN_SC_SERVER_ERROR;
status = PJ_STATUS_FROM_STUN_CODE(err_code);
} else {
/* Get MAPPED-ADDRESS or XOR-MAPPED-ADDRESS */
mattr = (pj_stun_sockaddr_attr*)
pj_stun_msg_find_attr(response, PJ_STUN_ATTR_XOR_MAPPED_ADDR, 0);
if (mattr == NULL) {
mattr = (pj_stun_sockaddr_attr*)
pj_stun_msg_find_attr(response, PJ_STUN_ATTR_MAPPED_ADDR, 0);
}
if (mattr == NULL) {
status = PJNATH_ESTUNNOMAPPEDADDR;
}
/* Get CHANGED-ADDRESS attribute */
ca = (pj_stun_changed_addr_attr*)
pj_stun_msg_find_attr(response, PJ_STUN_ATTR_CHANGED_ADDR, 0);
if (ca == NULL) {
status = PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_SERVER_ERROR);
}
}
}
/* Save the result */
tsx_id = (pj_uint32_t*) tdata->msg->hdr.tsx_id;
test_id = tsx_id[2];
if (test_id >= ST_MAX) {
PJ_LOG(4,(sess->pool->obj_name, "Invalid transaction ID %u in response",
test_id));
end_session(sess, PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_SERVER_ERROR),
PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
goto on_return;
}
PJ_LOG(5,(sess->pool->obj_name, "Completed %s, status=%d",
test_names[test_id], status));
sess->result[test_id].complete = PJ_TRUE;
sess->result[test_id].status = status;
if (status == PJ_SUCCESS) {
pj_memcpy(&sess->result[test_id].ma, &mattr->sockaddr.ipv4,
sizeof(pj_sockaddr_in));
pj_memcpy(&sess->result[test_id].ca, &ca->sockaddr.ipv4,
sizeof(pj_sockaddr_in));
}
/* Send Test 1B only when Test 2 completes. Must not send Test 1B
* before Test 2 completes to avoid creating mapping on the NAT.
*/
if (!sess->result[ST_TEST_1B].executed &&
sess->result[ST_TEST_2].complete &&
//.........这里部分代码省略.........
示例10: tsx_callback
//.........这里部分代码省略.........
if (pubc->auto_refresh && expires)
expiration = expires->ivalue;
if (pubc->auto_refresh && expiration!=0 && expiration!=0xFFFF) {
pj_time_val delay = { 0, 0};
/* Cancel existing timer, if any */
if (pubc->timer.id != 0) {
pjsip_endpt_cancel_timer(pubc->endpt, &pubc->timer);
pubc->timer.id = 0;
}
delay.sec = expiration - DELAY_BEFORE_REFRESH;
if (pubc->expires != PJSIP_PUBC_EXPIRATION_NOT_SPECIFIED &&
delay.sec > (pj_int32_t)pubc->expires)
{
delay.sec = pubc->expires;
}
if (delay.sec < DELAY_BEFORE_REFRESH)
delay.sec = DELAY_BEFORE_REFRESH;
pubc->timer.cb = &pubc_refresh_timer_cb;
pubc->timer.id = REFRESH_TIMER;
pubc->timer.user_data = pubc;
pjsip_endpt_schedule_timer( pubc->endpt, &pubc->timer, &delay);
pj_gettimeofday(&pubc->last_refresh);
pubc->next_refresh = pubc->last_refresh;
pubc->next_refresh.sec += delay.sec;
}
} else {
rdata = (event->body.tsx_state.type==PJSIP_EVENT_RX_MSG) ?
event->body.tsx_state.src.rdata : NULL;
}
/* Call callback. */
if (expiration == 0xFFFF) expiration = -1;
/* Temporarily increment pending_tsx to prevent callback from
* destroying pubc.
*/
++pubc->pending_tsx;
call_callback(pubc, PJ_SUCCESS, tsx->status_code,
(rdata ? &rdata->msg_info.msg->line.status.reason
: pjsip_get_status_text(tsx->status_code)),
rdata, expiration);
--pubc->pending_tsx;
/* If we have pending request(s), send them now */
pj_mutex_lock(pubc->mutex);
while (!pj_list_empty(&pubc->pending_reqs)) {
pjsip_tx_data *tdata = pubc->pending_reqs.next;
pj_list_erase(tdata);
/* Add SIP-If-Match if we have etag and the request doesn't have
* one (http://trac.pjsip.org/repos/ticket/996)
*/
if (pubc->etag.slen) {
const pj_str_t STR_HNAME = { "SIP-If-Match", 12 };
pjsip_generic_string_hdr *sim_hdr;
sim_hdr = (pjsip_generic_string_hdr*)
pjsip_msg_find_hdr_by_name(tdata->msg, &STR_HNAME, NULL);
if (!sim_hdr) {
/* Create the header */
sim_hdr = pjsip_generic_string_hdr_create(tdata->pool,
&STR_HNAME,
&pubc->etag);
pjsip_msg_add_hdr(tdata->msg, (pjsip_hdr*)sim_hdr);
} else {
/* Update */
if (pj_strcmp(&pubc->etag, &sim_hdr->hvalue))
pj_strdup(tdata->pool, &sim_hdr->hvalue, &pubc->etag);
}
}
status = pjsip_publishc_send(pubc, tdata);
if (status == PJ_EPENDING) {
pj_assert(!"Not expected");
pj_list_erase(tdata);
pjsip_tx_data_dec_ref(tdata);
} else if (status == PJ_SUCCESS) {
break;
}
}
pj_mutex_unlock(pubc->mutex);
}
/* No longer in callback. */
--pubc->in_callback;
/* Delete the record if user destroy pubc during the callback. */
if (pubc->_delete_flag && pubc->pending_tsx==0) {
pjsip_publishc_destroy(pubc);
}
}
示例11: zrtp_synchEnter
static void zrtp_synchEnter(ZrtpContext* ctx)
{
struct tp_zrtp *zrtp = (struct tp_zrtp*)ctx->userData;
pj_mutex_lock(zrtp->zrtpMutex);
}
示例12: g729_alloc_codec
static pj_status_t g729_alloc_codec( pjmedia_codec_factory *factory,
const pjmedia_codec_info *id,
pjmedia_codec **p_codec)
{
pjmedia_codec *codec = NULL;
pj_status_t status;
pj_pool_t *pool;
PJ_ASSERT_RETURN(factory && id && p_codec, PJ_EINVAL);
PJ_ASSERT_RETURN(factory==&g729_factory.base, PJ_EINVAL);
/* Lock mutex. */
pj_mutex_lock(g729_factory.mutex);
/* Allocate new codec if no more is available */
struct g729_private *codec_priv;
/* Create pool for codec instance */
pool = pjmedia_endpt_create_pool(g729_factory.endpt, "g729codec", 512, 512);
codec = PJ_POOL_ALLOC_T(pool, pjmedia_codec);
codec_priv = PJ_POOL_ZALLOC_T(pool, struct g729_private);
if (!codec || !codec_priv) {
pj_pool_release(pool);
pj_mutex_unlock(g729_factory.mutex);
return PJ_ENOMEM;
}
codec_priv->pool = pool;
/* Set the payload type */
codec_priv->pt = id->pt;
#if !PLC_DISABLED
/* Create PLC, always with 10ms ptime */
status = pjmedia_plc_create(pool, 8000, 80, 0, &codec_priv->plc);
if (status != PJ_SUCCESS) {
pj_pool_release(pool);
pj_mutex_unlock(g729_factory.mutex);
return status;
}
#endif
/* Create VAD */
status = pjmedia_silence_det_create(g729_factory.pool,
8000, 80,
&codec_priv->vad);
if (status != PJ_SUCCESS) {
pj_mutex_unlock(g729_factory.mutex);
return status;
}
codec->factory = factory;
codec->op = &g729_op;
codec->codec_data = codec_priv;
*p_codec = codec;
/* Unlock mutex. */
pj_mutex_unlock(g729_factory.mutex);
return PJ_SUCCESS;
}
示例13: increment_counter
/* Increment key's reference counter */
static void increment_counter(pj_ioqueue_key_t *key)
{
pj_mutex_lock(key->ioqueue->ref_cnt_mutex);
++key->ref_count;
pj_mutex_unlock(key->ioqueue->ref_cnt_mutex);
}
示例14: PJ_DEF
/*
* pj_ioqueue_recv()
*
* Start asynchronous recv() from the socket.
*/
PJ_DEF(pj_status_t) pj_ioqueue_recv( pj_ioqueue_key_t *key,
pj_ioqueue_op_key_t *op_key,
void *buffer,
pj_ssize_t *length,
unsigned flags )
{
struct read_operation *read_op;
PJ_ASSERT_RETURN(key && op_key && buffer && length, PJ_EINVAL);
PJ_CHECK_STACK();
/* Check if key is closing (need to do this first before accessing
* other variables, since they might have been destroyed. See ticket
* #469).
*/
if (IS_CLOSING(key))
return PJ_ECANCELLED;
read_op = (struct read_operation*)op_key;
read_op->op = PJ_IOQUEUE_OP_NONE;
/* Try to see if there's data immediately available.
*/
if ((flags & PJ_IOQUEUE_ALWAYS_ASYNC) == 0) {
pj_status_t status;
pj_ssize_t size;
size = *length;
status = pj_sock_recv(key->fd, buffer, &size, flags);
if (status == PJ_SUCCESS) {
/* Yes! Data is available! */
*length = size;
return PJ_SUCCESS;
} else {
/* If error is not EWOULDBLOCK (or EAGAIN on Linux), report
* the error to caller.
*/
if (status != PJ_STATUS_FROM_OS(PJ_BLOCKING_ERROR_VAL))
return status;
}
}
flags &= ~(PJ_IOQUEUE_ALWAYS_ASYNC);
/*
* No data is immediately available.
* Must schedule asynchronous operation to the ioqueue.
*/
read_op->op = PJ_IOQUEUE_OP_RECV;
read_op->buf = buffer;
read_op->size = *length;
read_op->flags = flags;
pj_mutex_lock(key->mutex);
/* Check again. Handle may have been closed after the previous check
* in multithreaded app. If we add bad handle to the set it will
* corrupt the ioqueue set. See #913
*/
if (IS_CLOSING(key)) {
pj_mutex_unlock(key->mutex);
return PJ_ECANCELLED;
}
pj_list_insert_before(&key->read_list, read_op);
ioqueue_add_to_set(key->ioqueue, key, READABLE_EVENT);
pj_mutex_unlock(key->mutex);
return PJ_EPENDING;
}
示例15: PJ_DEF
PJ_DEF(void) pj_enter_critical_section(void)
{
#if PJ_HAS_THREADS
pj_mutex_lock(&critical_section);
#endif
}