本文整理汇总了C++中PJ_STATUS_FROM_OS函数的典型用法代码示例。如果您正苦于以下问题:C++ PJ_STATUS_FROM_OS函数的具体用法?C++ PJ_STATUS_FROM_OS怎么用?C++ PJ_STATUS_FROM_OS使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了PJ_STATUS_FROM_OS函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: on_read_complete
/*
* Callback upon receiving packet from network.
*/
static void on_read_complete(pj_ioqueue_key_t *key,
pj_ioqueue_op_key_t *op_key,
pj_ssize_t bytes_read)
{
nat_detect_session *sess;
pj_status_t status;
sess = (nat_detect_session *) pj_ioqueue_get_user_data(key);
pj_assert(sess != NULL);
pj_grp_lock_acquire(sess->grp_lock);
/* Ignore packet when STUN session has been destroyed */
if (!sess->stun_sess)
goto on_return;
if (bytes_read < 0) {
if (-bytes_read != PJ_STATUS_FROM_OS(OSERR_EWOULDBLOCK) &&
-bytes_read != PJ_STATUS_FROM_OS(OSERR_EINPROGRESS) &&
-bytes_read != PJ_STATUS_FROM_OS(OSERR_ECONNRESET))
{
/* Permanent error */
end_session(sess, (pj_status_t)-bytes_read,
PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
goto on_return;
}
} else if (bytes_read > 0) {
pj_stun_session_on_rx_pkt(sess->stun_sess, sess->rx_pkt, bytes_read,
PJ_STUN_IS_DATAGRAM|PJ_STUN_CHECK_PACKET,
NULL, NULL,
&sess->src_addr, sess->src_addr_len);
}
sess->rx_pkt_len = sizeof(sess->rx_pkt);
sess->src_addr_len = sizeof(sess->src_addr);
status = pj_ioqueue_recvfrom(key, op_key, sess->rx_pkt, &sess->rx_pkt_len,
PJ_IOQUEUE_ALWAYS_ASYNC,
&sess->src_addr, &sess->src_addr_len);
if (status != PJ_EPENDING) {
pj_assert(status != PJ_SUCCESS);
end_session(sess, status, PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
}
on_return:
pj_grp_lock_release(sess->grp_lock);
}
示例2: lis_on_accept_complete
/*
* Callback on new TCP connection.
*/
static void lis_on_accept_complete(pj_ioqueue_key_t *key,
pj_ioqueue_op_key_t *op_key,
pj_sock_t sock,
pj_status_t status)
{
struct tcp_listener *tcp_lis;
struct accept_op *accept_op = (struct accept_op*) op_key;
tcp_lis = (struct tcp_listener*) pj_ioqueue_get_user_data(key);
PJ_UNUSED_ARG(sock);
do {
/* Report new connection. */
if (status == PJ_SUCCESS) {
char addr[PJ_INET6_ADDRSTRLEN+8];
PJ_LOG(5,(tcp_lis->base.obj_name, "Incoming TCP from %s",
pj_sockaddr_print(&accept_op->src_addr, addr,
sizeof(addr), 3)));
transport_create(accept_op->sock, &tcp_lis->base,
&accept_op->src_addr, accept_op->src_addr_len);
} else if (status != PJ_EPENDING) {
show_err(tcp_lis->base.obj_name, "accept()", status);
}
/* Prepare next accept() */
accept_op->src_addr_len = sizeof(accept_op->src_addr);
status = pj_ioqueue_accept(key, op_key, &accept_op->sock,
NULL,
&accept_op->src_addr,
&accept_op->src_addr_len);
} while (status != PJ_EPENDING && status != PJ_ECANCELLED &&
status != PJ_STATUS_FROM_OS(PJ_BLOCKING_ERROR_VAL));
}
示例3: PJ_DEF
/*
* pj_ioqueue_recv()
*
* Start asynchronous recv() from the socket.
*/
PJ_DEF(pj_status_t) pj_ioqueue_recv( pj_ioqueue_key_t *key,
pj_ioqueue_op_key_t *op_key,
void *buffer,
pj_ssize_t *length,
unsigned flags )
{
struct read_operation *read_op;
PJ_ASSERT_RETURN(key && op_key && buffer && length, PJ_EINVAL);
PJ_CHECK_STACK();
/* Check if key is closing (need to do this first before accessing
* other variables, since they might have been destroyed. See ticket
* #469).
*/
if (IS_CLOSING(key))
return PJ_ECANCELLED;
read_op = (struct read_operation*)op_key;
read_op->op = PJ_IOQUEUE_OP_NONE;
/* Try to see if there's data immediately available.
*/
if ((flags & PJ_IOQUEUE_ALWAYS_ASYNC) == 0) {
pj_status_t status;
pj_ssize_t size;
size = *length;
status = pj_sock_recv(key->fd, buffer, &size, flags);
if (status == PJ_SUCCESS) {
/* Yes! Data is available! */
*length = size;
return PJ_SUCCESS;
} else {
/* If error is not EWOULDBLOCK (or EAGAIN on Linux), report
* the error to caller.
*/
if (status != PJ_STATUS_FROM_OS(PJ_BLOCKING_ERROR_VAL))
return status;
}
}
flags &= ~(PJ_IOQUEUE_ALWAYS_ASYNC);
/*
* No data is immediately available.
* Must schedule asynchronous operation to the ioqueue.
*/
read_op->op = PJ_IOQUEUE_OP_RECV;
read_op->buf = buffer;
read_op->size = *length;
read_op->flags = flags;
pj_mutex_lock(key->mutex);
pj_list_insert_before(&key->read_list, read_op);
ioqueue_add_to_set(key->ioqueue, key, READABLE_EVENT);
pj_mutex_unlock(key->mutex);
return PJ_EPENDING;
}
示例4: PJ_DEF
/*
* pj_mutex_unlock()
*/
PJ_DEF(pj_status_t) pj_mutex_unlock(pj_mutex_t *mutex)
{
pj_status_t status;
PJ_CHECK_STACK();
PJ_ASSERT_RETURN(mutex, PJ_EINVAL);
#if PJ_DEBUG
pj_assert(mutex->owner == pj_thread_this());
if (--mutex->nesting_level == 0) {
mutex->owner = NULL;
}
#endif
LOG_MUTEX((mutex->obj_name, "Mutex released by thread %s",
pj_thread_this()->obj_name));
#if PJ_WIN32_WINNT >= 0x0400
LeaveCriticalSection(&mutex->crit);
status=PJ_SUCCESS;
#else
status = ReleaseMutex(mutex->hMutex) ? PJ_SUCCESS :
PJ_STATUS_FROM_OS(GetLastError());
#endif
return status;
}
示例5: check_connecting
/*
* Poll for the completion of non-blocking connect().
* If there's a completion, the function return the key of the completed
* socket, and 'result' argument contains the connect() result. If connect()
* succeeded, 'result' will have value zero, otherwise will have the error
* code.
*/
static int check_connecting( pj_ioqueue_t *ioqueue )
{
if (ioqueue->connecting_count) {
int i, count;
struct
{
pj_ioqueue_key_t *key;
pj_status_t status;
} events[PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL-1];
pj_lock_acquire(ioqueue->lock);
for (count=0; count<PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL-1; ++count) {
DWORD result;
result = WaitForMultipleObjects(ioqueue->connecting_count,
ioqueue->connecting_handles,
FALSE, 0);
if (result >= WAIT_OBJECT_0 &&
result < WAIT_OBJECT_0+ioqueue->connecting_count)
{
WSANETWORKEVENTS net_events;
/* Got completed connect(). */
unsigned pos = result - WAIT_OBJECT_0;
events[count].key = ioqueue->connecting_keys[pos];
/* See whether connect has succeeded. */
WSAEnumNetworkEvents((pj_sock_t)events[count].key->hnd,
ioqueue->connecting_handles[pos],
&net_events);
events[count].status =
PJ_STATUS_FROM_OS(net_events.iErrorCode[FD_CONNECT_BIT]);
/* Erase socket from pending connect. */
erase_connecting_socket(ioqueue, pos);
} else {
/* No more events */
break;
}
}
pj_lock_release(ioqueue->lock);
/* Call callbacks. */
for (i=0; i<count; ++i) {
if (events[i].key->cb.on_connect_complete) {
events[i].key->cb.on_connect_complete(events[i].key,
events[i].status);
}
}
return count;
}
return 0;
}
示例6: sock_set_net_service_type
static pj_status_t sock_set_net_service_type(pj_sock_t sock, int val)
{
pj_status_t status;
status = pj_sock_setsockopt(sock, pj_SOL_SOCKET(), SO_NET_SERVICE_TYPE,
&val, sizeof(val));
if (status == PJ_STATUS_FROM_OS(OSERR_ENOPROTOOPT))
status = PJ_ENOTSUP;
return status;
}
示例7: sock_get_net_service_type
static pj_status_t sock_get_net_service_type(pj_sock_t sock, int *p_val)
{
pj_status_t status;
int optlen = sizeof(*p_val);
PJ_ASSERT_RETURN(p_val, PJ_EINVAL);
status = pj_sock_getsockopt(sock, pj_SOL_SOCKET(), SO_NET_SERVICE_TYPE,
p_val, &optlen);
if (status == PJ_STATUS_FROM_OS(OSERR_ENOPROTOOPT))
status = PJ_ENOTSUP;
return status;
}
示例8: on_read_complete
/*
* Callback on received packet.
*/
static void on_read_complete(pj_ioqueue_key_t *key,
pj_ioqueue_op_key_t *op_key,
pj_ssize_t bytes_read)
{
struct udp_listener *udp;
struct read_op *read_op = (struct read_op*) op_key;
pj_status_t status;
udp = (struct udp_listener*) pj_ioqueue_get_user_data(key);
do {
pj_pool_t *rpool;
/* Report to server */
if (bytes_read > 0) {
read_op->pkt.len = bytes_read;
pj_gettimeofday(&read_op->pkt.rx_time);
pj_turn_srv_on_rx_pkt(udp->base.server, &read_op->pkt);
}
/* Reset pool */
rpool = read_op->pkt.pool;
pj_pool_reset(rpool);
read_op->pkt.pool = rpool;
read_op->pkt.transport = &udp->tp;
read_op->pkt.src.tp_type = udp->base.tp_type;
/* Read next packet */
bytes_read = sizeof(read_op->pkt.pkt);
read_op->pkt.src_addr_len = sizeof(read_op->pkt.src.clt_addr);
pj_bzero(&read_op->pkt.src.clt_addr, sizeof(read_op->pkt.src.clt_addr));
status = pj_ioqueue_recvfrom(udp->key, op_key,
read_op->pkt.pkt, &bytes_read, 0,
&read_op->pkt.src.clt_addr,
&read_op->pkt.src_addr_len);
if (status != PJ_EPENDING && status != PJ_SUCCESS)
bytes_read = -status;
} while (status != PJ_EPENDING && status != PJ_ECANCELLED &&
status != PJ_STATUS_FROM_OS(PJ_BLOCKING_ERROR_VAL));
}
示例9: PJ_DEF
/*
* Initiate overlapped connect() operation (well, it's non-blocking actually,
* since there's no overlapped version of connect()).
*/
PJ_DEF(pj_status_t) pj_ioqueue_connect( pj_ioqueue_key_t *key,
const pj_sockaddr_t *addr,
int addrlen )
{
pj_status_t status;
/* check parameters. All must be specified! */
PJ_ASSERT_RETURN(key && addr && addrlen, PJ_EINVAL);
/* Check if key is closing. */
if (IS_CLOSING(key))
return PJ_ECANCELLED;
/* Check if socket has not been marked for connecting */
if (key->connecting != 0)
return PJ_EPENDING;
status = pj_sock_connect(key->fd, addr, addrlen);
if (status == PJ_SUCCESS) {
/* Connected! */
return PJ_SUCCESS;
} else {
if (status == PJ_STATUS_FROM_OS(PJ_BLOCKING_CONNECT_ERROR_VAL)) {
/* Pending! */
pj_ioqueue_lock_key(key);
/* Check again. Handle may have been closed after the previous
* check in multithreaded app. See #913
*/
if (IS_CLOSING(key)) {
pj_ioqueue_unlock_key(key);
return PJ_ECANCELLED;
}
key->connecting = PJ_TRUE;
ioqueue_add_to_set(key->ioqueue, key, WRITEABLE_EVENT);
ioqueue_add_to_set(key->ioqueue, key, EXCEPTION_EVENT);
pj_ioqueue_unlock_key(key);
return PJ_EPENDING;
} else {
/* Error! */
return status;
}
}
}
示例10: ioqueue_on_accept_complete
static void ioqueue_on_accept_complete(pj_ioqueue_key_t *key,
pj_ioqueue_op_key_t *op_key,
pj_sock_t new_sock,
pj_status_t status)
{
pj_activesock_t *asock = (pj_activesock_t*) pj_ioqueue_get_user_data(key);
struct accept_op *accept_op = (struct accept_op*) op_key;
PJ_UNUSED_ARG(new_sock);
/* Ignore if we've been shutdown */
if (asock->shutdown)
return;
do {
if (status == asock->last_err && status != PJ_SUCCESS) {
asock->err_counter++;
if (asock->err_counter >= PJ_ACTIVESOCK_MAX_CONSECUTIVE_ACCEPT_ERROR) {
PJ_LOG(3, ("", "Received %d consecutive errors: %d for the accept()"
" operation, stopping further ioqueue accepts.",
asock->err_counter, asock->last_err));
if ((status == PJ_STATUS_FROM_OS(OSERR_EWOULDBLOCK)) &&
(asock->cb.on_accept_complete2))
{
(*asock->cb.on_accept_complete2)(asock,
accept_op->new_sock,
&accept_op->rem_addr,
accept_op->rem_addr_len,
PJ_ESOCKETSTOP);
}
return;
}
} else {
asock->err_counter = 0;
asock->last_err = status;
}
if (status==PJ_SUCCESS && (asock->cb.on_accept_complete2 ||
asock->cb.on_accept_complete)) {
pj_bool_t ret;
/* Notify callback */
if (asock->cb.on_accept_complete2) {
ret = (*asock->cb.on_accept_complete2)(asock,
accept_op->new_sock,
&accept_op->rem_addr,
accept_op->rem_addr_len,
status);
} else {
ret = (*asock->cb.on_accept_complete)(asock,
accept_op->new_sock,
&accept_op->rem_addr,
accept_op->rem_addr_len);
}
/* If callback returns false, we have been destroyed! */
if (!ret)
return;
#if defined(PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT) && \
PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT!=0
activesock_create_iphone_os_stream(asock);
#endif
} else if (status==PJ_SUCCESS) {
/* Application doesn't handle the new socket, we need to
* close it to avoid resource leak.
*/
pj_sock_close(accept_op->new_sock);
}
/* Don't start another accept() if we've been shutdown */
if (asock->shutdown)
return;
/* Prepare next accept() */
accept_op->new_sock = PJ_INVALID_SOCKET;
accept_op->rem_addr_len = sizeof(accept_op->rem_addr);
status = pj_ioqueue_accept(asock->key, op_key, &accept_op->new_sock,
NULL, &accept_op->rem_addr,
&accept_op->rem_addr_len);
} while (status != PJ_EPENDING && status != PJ_ECANCELLED);
}
示例11: PJ_DEF
/*
* pj_ioqueue_connect()
*
* Initiate overlapped connect() operation (well, it's non-blocking actually,
* since there's no overlapped version of connect()).
*/
PJ_DEF(pj_status_t) pj_ioqueue_connect( pj_ioqueue_key_t *key,
const pj_sockaddr_t *addr,
int addrlen )
{
HANDLE hEvent;
pj_ioqueue_t *ioqueue;
PJ_CHECK_STACK();
PJ_ASSERT_RETURN(key && addr && addrlen, PJ_EINVAL);
#if PJ_IOQUEUE_HAS_SAFE_UNREG
/* Check key is not closing */
if (key->closing)
return PJ_ECANCELLED;
#endif
/* Initiate connect() */
if (connect((pj_sock_t)key->hnd, addr, addrlen) != 0) {
DWORD dwStatus;
dwStatus = WSAGetLastError();
if (dwStatus != WSAEWOULDBLOCK) {
return PJ_RETURN_OS_ERROR(dwStatus);
}
} else {
/* Connect has completed immediately! */
return PJ_SUCCESS;
}
ioqueue = key->ioqueue;
/* Add to the array of connecting socket to be polled */
pj_lock_acquire(ioqueue->lock);
if (ioqueue->connecting_count >= MAXIMUM_WAIT_OBJECTS) {
pj_lock_release(ioqueue->lock);
return PJ_ETOOMANYCONN;
}
/* Get or create event object. */
if (ioqueue->event_count) {
hEvent = ioqueue->event_pool[ioqueue->event_count - 1];
--ioqueue->event_count;
} else {
hEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
if (hEvent == NULL) {
DWORD dwStatus = GetLastError();
pj_lock_release(ioqueue->lock);
return PJ_STATUS_FROM_OS(dwStatus);
}
}
/* Mark key as connecting.
* We can't use array index since key can be removed dynamically.
*/
key->connecting = 1;
/* Associate socket events to the event object. */
if (WSAEventSelect((pj_sock_t)key->hnd, hEvent, FD_CONNECT) != 0) {
CloseHandle(hEvent);
pj_lock_release(ioqueue->lock);
return PJ_RETURN_OS_ERROR(WSAGetLastError());
}
/* Add to array. */
ioqueue->connecting_keys[ ioqueue->connecting_count ] = key;
ioqueue->connecting_handles[ ioqueue->connecting_count ] = hEvent;
ioqueue->connecting_count++;
pj_lock_release(ioqueue->lock);
return PJ_EPENDING;
}
示例12: wmme_dev_thread
//.........这里部分代码省略.........
if (signalled_dir == PJMEDIA_DIR_PLAYBACK)
{
struct wmme_stream *wmme_strm = &strm->play_strm;
MMRESULT mr = MMSYSERR_NOERROR;
status = PJ_SUCCESS;
/*
* Windows Multimedia has requested us to feed some frames to
* playback buffer.
*/
while (wmme_strm->WaveHdr[wmme_strm->dwBufIdx].dwFlags & WHDR_DONE)
{
void* buffer = wmme_strm->WaveHdr[wmme_strm->dwBufIdx].lpData;
PJ_LOG(5,(THIS_FILE, "Finished writing buffer %d",
wmme_strm->dwBufIdx));
/* Get frame from application. */
status = (*strm->play_cb)(strm->user_data,
wmme_strm->timestamp.u32.lo,
buffer,
bytes_per_frame);
if (status != PJ_SUCCESS)
break;
/* Write to the device. */
mr = waveOutWrite(wmme_strm->hWave.Out,
&(wmme_strm->WaveHdr[wmme_strm->dwBufIdx]),
sizeof(WAVEHDR));
if (mr != MMSYSERR_NOERROR)
{
status = PJ_STATUS_FROM_OS(mr);
break;
}
/* Increment position. */
if (++wmme_strm->dwBufIdx >= wmme_strm->dwMaxBufIdx)
wmme_strm->dwBufIdx = 0;
wmme_strm->timestamp.u64 += strm->samples_per_frame /
strm->channel_count;
}
}
else
{
struct wmme_stream *wmme_strm = &strm->rec_strm;
MMRESULT mr = MMSYSERR_NOERROR;
status = PJ_SUCCESS;
/*
* Windows Multimedia has indicated that it has some frames ready
* in the capture buffer. Get as much frames as possible to
* prevent overflows.
*/
#if 0
{
static DWORD tc = 0;
DWORD now = GetTickCount();
DWORD i = 0;
DWORD bits = 0;
if (tc == 0) tc = now;
for (i = 0; i < wmme_strm->dwMaxBufIdx; ++i)
{
示例13: loop_send_msg
/* Handler for sending outgoing message; called by transport manager. */
static pj_status_t loop_send_msg( pjsip_transport *tp,
pjsip_tx_data *tdata,
const pj_sockaddr_t *rem_addr,
int addr_len,
void *token,
void (*cb)(pjsip_transport *transport,
void *token,
pj_ssize_t sent_bytes))
{
struct loop_transport *loop = (struct loop_transport*)tp;
struct recv_list *recv_pkt;
PJ_ASSERT_RETURN(tp && (tp->key.type == PJSIP_TRANSPORT_LOOP ||
tp->key.type == PJSIP_TRANSPORT_LOOP_DGRAM), PJ_EINVAL);
PJ_UNUSED_ARG(rem_addr);
PJ_UNUSED_ARG(addr_len);
/* Need to send failure? */
if (loop->fail_mode) {
if (loop->send_delay == 0) {
return PJ_STATUS_FROM_OS(OSERR_ECONNRESET);
} else {
add_notification(loop, tdata, -PJ_STATUS_FROM_OS(OSERR_ECONNRESET),
token, cb);
return PJ_EPENDING;
}
}
/* Discard any packets? */
if (loop->discard)
return PJ_SUCCESS;
/* Create rdata for the "incoming" packet. */
recv_pkt = create_incoming_packet(loop, tdata);
if (!recv_pkt)
return PJ_ENOMEM;
/* If delay is not configured, deliver this packet now! */
if (loop->recv_delay == 0) {
pj_ssize_t size_eaten;
size_eaten = pjsip_tpmgr_receive_packet( loop->base.tpmgr,
&recv_pkt->rdata);
pj_assert(size_eaten == recv_pkt->rdata.pkt_info.len);
pjsip_endpt_release_pool(loop->base.endpt,
recv_pkt->rdata.tp_info.pool);
} else {
/* Otherwise if delay is configured, add the "packet" to the
* receive list to be processed by worker thread.
*/
pj_lock_acquire(loop->base.lock);
pj_list_push_back(&loop->recv_list, recv_pkt);
pj_lock_release(loop->base.lock);
}
if (loop->send_delay != 0) {
add_notification(loop, tdata, tdata->buf.cur - tdata->buf.start,
token, cb);
return PJ_EPENDING;
} else {
return PJ_SUCCESS;
}
}
示例14: ioqueue_dispatch_read_event
//.........这里部分代码省略.........
ioqueue_remove_from_set(ioqueue, h, READABLE_EVENT);
bytes_read = read_op->size;
if ((read_op->op == PJ_IOQUEUE_OP_RECV_FROM)) {
read_op->op = PJ_IOQUEUE_OP_NONE;
rc = pj_sock_recvfrom(h->fd, read_op->buf, &bytes_read,
read_op->flags,
read_op->rmt_addr,
read_op->rmt_addrlen);
} else if ((read_op->op == PJ_IOQUEUE_OP_RECV)) {
read_op->op = PJ_IOQUEUE_OP_NONE;
rc = pj_sock_recv(h->fd, read_op->buf, &bytes_read,
read_op->flags);
} else {
pj_assert(read_op->op == PJ_IOQUEUE_OP_READ);
read_op->op = PJ_IOQUEUE_OP_NONE;
/*
* User has specified pj_ioqueue_read().
* On Win32, we should do ReadFile(). But because we got
* here because of select() anyway, user must have put a
* socket descriptor on h->fd, which in this case we can
* just call pj_sock_recv() instead of ReadFile().
* On Unix, user may put a file in h->fd, so we'll have
* to call read() here.
* This may not compile on systems which doesn't have
* read(). That's why we only specify PJ_LINUX here so
* that error is easier to catch.
*/
# if defined(PJ_WIN32) && PJ_WIN32 != 0 || \
defined(PJ_WIN32_WINCE) && PJ_WIN32_WINCE != 0
rc = pj_sock_recv(h->fd, read_op->buf, &bytes_read,
read_op->flags);
//rc = ReadFile((HANDLE)h->fd, read_op->buf, read_op->size,
// &bytes_read, NULL);
# elif (defined(PJ_HAS_UNISTD_H) && PJ_HAS_UNISTD_H != 0)
bytes_read = read(h->fd, read_op->buf, bytes_read);
rc = (bytes_read >= 0) ? PJ_SUCCESS : pj_get_os_error();
# elif defined(PJ_LINUX_KERNEL) && PJ_LINUX_KERNEL != 0
bytes_read = sys_read(h->fd, read_op->buf, bytes_read);
rc = (bytes_read >= 0) ? PJ_SUCCESS : -bytes_read;
# else
# error "Implement read() for this platform!"
# endif
}
if (rc != PJ_SUCCESS) {
# if defined(PJ_WIN32) && PJ_WIN32 != 0
/* On Win32, for UDP, WSAECONNRESET on the receive side
* indicates that previous sending has triggered ICMP Port
* Unreachable message.
* But we wouldn't know at this point which one of previous
* key that has triggered the error, since UDP socket can
* be shared!
* So we'll just ignore it!
*/
if (rc == PJ_STATUS_FROM_OS(WSAECONNRESET)) {
//PJ_LOG(4,(THIS_FILE,
// "Ignored ICMP port unreach. on key=%p", h));
}
# endif
/* In any case we would report this to caller. */
bytes_read = -rc;
}
/* Unlock; from this point we don't need to hold key's mutex
* (unless concurrency is disabled, which in this case we should
* hold the mutex while calling the callback) */
if (h->allow_concurrent) {
/* concurrency may be changed while we're in the callback, so
* save it to a flag.
*/
has_lock = PJ_FALSE;
pj_mutex_unlock(h->mutex);
} else {
has_lock = PJ_TRUE;
}
/* Call callback. */
if (h->cb.on_read_complete && !IS_CLOSING(h)) {
(*h->cb.on_read_complete)(h,
(pj_ioqueue_op_key_t*)read_op,
bytes_read);
}
if (has_lock) {
pj_mutex_unlock(h->mutex);
}
} else {
/*
* This is normal; execution may fall here when multiple threads
* are signalled for the same event, but only one thread eventually
* able to process the event.
*/
pj_mutex_unlock(h->mutex);
}
}
示例15: ioqueue_on_read_complete
static void ioqueue_on_read_complete(pj_ioqueue_key_t *key,
pj_ioqueue_op_key_t *op_key,
pj_ssize_t bytes_read)
{
pj_activesock_t *asock;
struct read_op *r = (struct read_op*)op_key;
unsigned loop = 0;
pj_status_t status;
asock = (pj_activesock_t*) pj_ioqueue_get_user_data(key);
/* Ignore if we've been shutdown */
if (asock->shutdown & SHUT_RX)
return;
do {
unsigned flags;
if (bytes_read > 0) {
/*
* We've got new data.
*/
pj_size_t remainder;
pj_bool_t ret;
/* Append this new data to existing data. If socket is stream
* oriented, user might have left some data in the buffer.
* Otherwise if socket is datagram there will be nothing in
* existing packet hence the packet will contain only the new
* packet.
*/
r->size += bytes_read;
/* Set default remainder to zero */
remainder = 0;
/* And return value to TRUE */
ret = PJ_TRUE;
/* Notify callback */
if (asock->read_type == TYPE_RECV && asock->cb.on_data_read) {
ret = (*asock->cb.on_data_read)(asock, r->pkt, r->size,
PJ_SUCCESS, &remainder);
} else if (asock->read_type == TYPE_RECV_FROM &&
asock->cb.on_data_recvfrom)
{
ret = (*asock->cb.on_data_recvfrom)(asock, r->pkt, r->size,
&r->src_addr,
r->src_addr_len,
PJ_SUCCESS);
}
/* If callback returns false, we have been destroyed! */
if (!ret)
return;
/* Only stream oriented socket may leave data in the packet */
if (asock->stream_oriented) {
r->size = remainder;
} else {
r->size = 0;
}
} else if (bytes_read <= 0 &&
-bytes_read != PJ_STATUS_FROM_OS(OSERR_EWOULDBLOCK) &&
-bytes_read != PJ_STATUS_FROM_OS(OSERR_EINPROGRESS) &&
(asock->stream_oriented ||
-bytes_read != PJ_STATUS_FROM_OS(OSERR_ECONNRESET)))
{
pj_size_t remainder;
pj_bool_t ret;
if (bytes_read == 0) {
/* For stream/connection oriented socket, this means the
* connection has been closed. For datagram sockets, it means
* we've received datagram with zero length.
*/
if (asock->stream_oriented)
status = PJ_EEOF;
else
status = PJ_SUCCESS;
} else {
/* This means we've got an error. If this is stream/connection
* oriented, it means connection has been closed. For datagram
* sockets, it means we've got some error (e.g. EWOULDBLOCK).
*/
status = (pj_status_t)-bytes_read;
}
/* Set default remainder to zero */
remainder = 0;
/* And return value to TRUE */
ret = PJ_TRUE;
/* Notify callback */
if (asock->read_type == TYPE_RECV && asock->cb.on_data_read) {
/* For connection oriented socket, we still need to report
* the remainder data (if any) to the user to let user do
* processing with the remainder data before it closes the
//.........这里部分代码省略.........