本文整理汇总了C++中pj_lock_create_recursive_mutex函数的典型用法代码示例。如果您正苦于以下问题:C++ pj_lock_create_recursive_mutex函数的具体用法?C++ pj_lock_create_recursive_mutex怎么用?C++ pj_lock_create_recursive_mutex使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pj_lock_create_recursive_mutex函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: create_stun_config
pj_status_t create_stun_config(pj_pool_t *pool, pj_stun_config *stun_cfg)
{
pj_ioqueue_t *ioqueue;
pj_timer_heap_t *timer_heap;
pj_lock_t *lock;
pj_status_t status;
status = pj_ioqueue_create(pool, 64, &ioqueue);
if (status != PJ_SUCCESS) {
app_perror(" pj_ioqueue_create()", status);
return status;
}
status = pj_timer_heap_create(pool, 256, &timer_heap);
if (status != PJ_SUCCESS) {
app_perror(" pj_timer_heap_create()", status);
pj_ioqueue_destroy(ioqueue);
return status;
}
pj_lock_create_recursive_mutex(pool, NULL, &lock);
pj_timer_heap_set_lock(timer_heap, lock, PJ_TRUE);
pj_stun_config_init(stun_cfg, mem, 0, ioqueue, timer_heap);
return PJ_SUCCESS;
}
示例2: PJ_DEF
PJ_DEF(pj_status_t) pjmedia_delay_buf_create( pj_pool_t *pool,
const char *name,
unsigned clock_rate,
unsigned samples_per_frame,
unsigned channel_count,
unsigned max_delay,
unsigned options,
pjmedia_delay_buf **p_b)
{
pjmedia_delay_buf *b;
pj_status_t status;
PJ_ASSERT_RETURN(pool && samples_per_frame && clock_rate && channel_count &&
p_b, PJ_EINVAL);
PJ_ASSERT_RETURN(options==0, PJ_EINVAL);
PJ_UNUSED_ARG(options);
if (!name) {
name = "delaybuf";
}
b = PJ_POOL_ZALLOC_T(pool, pjmedia_delay_buf);
pj_ansi_strncpy(b->obj_name, name, PJ_MAX_OBJ_NAME-1);
b->samples_per_frame = samples_per_frame;
b->channel_count = channel_count;
b->ptime = samples_per_frame * 1000 / clock_rate / channel_count;
if (max_delay < b->ptime)
max_delay = PJ_MAX(DEFAULT_MAX_DELAY, b->ptime);
b->max_cnt = samples_per_frame * max_delay / b->ptime;
b->eff_cnt = b->max_cnt >> 1;
b->recalc_timer = RECALC_TIME;
/* Create circular buffer */
status = pjmedia_circ_buf_create(pool, b->max_cnt, &b->circ_buf);
if (status != PJ_SUCCESS)
return status;
/* Create WSOLA */
status = pjmedia_wsola_create(pool, clock_rate, samples_per_frame, 1,
PJMEDIA_WSOLA_NO_FADING, &b->wsola);
if (status != PJ_SUCCESS)
return status;
/* Finally, create mutex */
status = pj_lock_create_recursive_mutex(pool, b->obj_name,
&b->lock);
if (status != PJ_SUCCESS)
return status;
*p_b = b;
TRACE__((b->obj_name,"Delay buffer created"));
return PJ_SUCCESS;
}
示例3: PJ_DEF
PJ_DEF(pj_status_t) pj_stun_session_create( pj_stun_config *cfg,
const char *name,
const pj_stun_session_cb *cb,
pj_bool_t fingerprint,
pj_stun_session **p_sess)
{
pj_pool_t *pool;
pj_stun_session *sess;
pj_status_t status;
PJ_ASSERT_RETURN(cfg && cb && p_sess, PJ_EINVAL);
if (name==NULL)
name = "stuse%p";
pool = pj_pool_create(cfg->pf, name, PJNATH_POOL_LEN_STUN_SESS,
PJNATH_POOL_INC_STUN_SESS, NULL);
PJ_ASSERT_RETURN(pool, PJ_ENOMEM);
sess = PJ_POOL_ZALLOC_T(pool, pj_stun_session);
sess->cfg = cfg;
sess->pool = pool;
pj_memcpy(&sess->cb, cb, sizeof(*cb));
sess->use_fingerprint = fingerprint;
sess->log_flag = 0xFFFF;
sess->srv_name.ptr = (char*) pj_pool_alloc(pool, 32);
sess->srv_name.slen = pj_ansi_snprintf(sess->srv_name.ptr, 32,
"pjnath-%s", pj_get_version());
sess->rx_pool = pj_pool_create(sess->cfg->pf, name,
PJNATH_POOL_LEN_STUN_TDATA,
PJNATH_POOL_INC_STUN_TDATA, NULL);
pj_list_init(&sess->pending_request_list);
pj_list_init(&sess->cached_response_list);
status = pj_lock_create_recursive_mutex(pool, name, &sess->lock);
if (status != PJ_SUCCESS) {
pj_pool_release(pool);
return status;
}
sess->delete_lock = PJ_TRUE;
status = pj_atomic_create(pool, 0, &sess->busy);
if (status != PJ_SUCCESS) {
pj_lock_destroy(sess->lock);
pj_pool_release(pool);
return status;
}
*p_sess = sess;
return PJ_SUCCESS;
}
示例4: PJ_DEF
PJ_DEF(pj_status_t) pjsip_regc_create( pjsip_endpoint *endpt, void *token,
pjsip_regc_cb *cb,
pjsip_regc **p_regc)
{
pj_pool_t *pool;
pjsip_regc *regc;
pj_status_t status;
/* Verify arguments. */
PJ_ASSERT_RETURN(endpt && cb && p_regc, PJ_EINVAL);
pool = pjsip_endpt_create_pool(endpt, "regc%p", 1024, 1024);
PJ_ASSERT_RETURN(pool != NULL, PJ_ENOMEM);
regc = PJ_POOL_ZALLOC_T(pool, pjsip_regc);
regc->pool = pool;
regc->endpt = endpt;
regc->token = token;
regc->cb = cb;
regc->expires = PJSIP_REGC_EXPIRATION_NOT_SPECIFIED;
regc->add_xuid_param = pjsip_cfg()->regc.add_xuid_param;
status = pj_lock_create_recursive_mutex(pool, pool->obj_name,
®c->lock);
if (status != PJ_SUCCESS) {
pj_pool_release(pool);
return status;
}
status = pj_atomic_create(pool, 0, ®c->busy_ctr);
if (status != PJ_SUCCESS) {
pj_lock_destroy(regc->lock);
pj_pool_release(pool);
return status;
}
status = pjsip_auth_clt_init(®c->auth_sess, endpt, regc->pool, 0);
if (status != PJ_SUCCESS)
return status;
pj_list_init(®c->route_set);
pj_list_init(®c->hdr_list);
pj_list_init(®c->contact_hdr_list);
pj_list_init(®c->removed_contact_hdr_list);
/* Done */
*p_regc = regc;
return PJ_SUCCESS;
}
示例5: tcp_create
/*
* Common function to create TCP transport, called when pending accept() and
* pending connect() complete.
*/
static pj_status_t tcp_create( struct tcp_listener *listener,
pj_pool_t *pool,
pj_sock_t sock, pj_bool_t is_server,
const pj_sockaddr *local,
const pj_sockaddr *remote,
struct tcp_transport **p_tcp)
{
struct tcp_transport *tcp;
pj_ioqueue_t *ioqueue;
pj_activesock_cfg asock_cfg;
pj_activesock_cb tcp_callback;
const pj_str_t ka_pkt = PJSIP_TCP_KEEP_ALIVE_DATA;
char print_addr[PJ_INET6_ADDRSTRLEN+10];
pj_status_t status;
PJ_ASSERT_RETURN(sock != PJ_INVALID_SOCKET, PJ_EINVAL);
if (pool == NULL) {
pool = pjsip_endpt_create_pool(listener->endpt, "tcp",
POOL_TP_INIT, POOL_TP_INC);
PJ_ASSERT_RETURN(pool != NULL, PJ_ENOMEM);
}
/*
* Create and initialize basic transport structure.
*/
tcp = PJ_POOL_ZALLOC_T(pool, struct tcp_transport);
tcp->is_server = is_server;
tcp->sock = sock;
/*tcp->listener = listener;*/
pj_list_init(&tcp->delayed_list);
tcp->base.pool = pool;
pj_ansi_snprintf(tcp->base.obj_name, PJ_MAX_OBJ_NAME,
(is_server ? "tcps%p" :"tcpc%p"), tcp);
status = pj_atomic_create(pool, 0, &tcp->base.ref_cnt);
if (status != PJ_SUCCESS) {
goto on_error;
}
status = pj_lock_create_recursive_mutex(pool, "tcp", &tcp->base.lock);
if (status != PJ_SUCCESS) {
goto on_error;
}
tcp->base.key.type = listener->factory.type;
pj_sockaddr_cp(&tcp->base.key.rem_addr, remote);
tcp->base.type_name = (char*)pjsip_transport_get_type_name(
(pjsip_transport_type_e)tcp->base.key.type);
tcp->base.flag = pjsip_transport_get_flag_from_type(
(pjsip_transport_type_e)tcp->base.key.type);
tcp->base.info = (char*) pj_pool_alloc(pool, 64);
pj_ansi_snprintf(tcp->base.info, 64, "%s to %s",
tcp->base.type_name,
pj_sockaddr_print(remote, print_addr,
sizeof(print_addr), 3));
tcp->base.addr_len = pj_sockaddr_get_len(remote);
pj_sockaddr_cp(&tcp->base.local_addr, local);
sockaddr_to_host_port(pool, &tcp->base.local_name, local);
sockaddr_to_host_port(pool, &tcp->base.remote_name, remote);
tcp->base.dir = is_server? PJSIP_TP_DIR_INCOMING : PJSIP_TP_DIR_OUTGOING;
tcp->base.endpt = listener->endpt;
tcp->base.tpmgr = listener->tpmgr;
tcp->base.send_msg = &tcp_send_msg;
tcp->base.do_shutdown = &tcp_shutdown;
tcp->base.destroy = &tcp_destroy_transport;
/* Create active socket */
pj_activesock_cfg_default(&asock_cfg);
asock_cfg.async_cnt = 1;
pj_bzero(&tcp_callback, sizeof(tcp_callback));
tcp_callback.on_data_read = &on_data_read;
tcp_callback.on_data_sent = &on_data_sent;
tcp_callback.on_connect_complete = &on_connect_complete;
ioqueue = pjsip_endpt_get_ioqueue(listener->endpt);
status = pj_activesock_create(pool, sock, pj_SOCK_STREAM(), &asock_cfg,
ioqueue, &tcp_callback, tcp, &tcp->asock);
if (status != PJ_SUCCESS) {
goto on_error;
}
/* Register transport to transport manager */
status = pjsip_transport_register(listener->tpmgr, &tcp->base);
if (status != PJ_SUCCESS) {
goto on_error;
}
tcp->is_registered = PJ_TRUE;
//.........这里部分代码省略.........
示例6: PJ_DEF
/*
* This is the public API to create, initialize, register, and start the
* TCP listener.
*/
PJ_DEF(pj_status_t) pjsip_tcp_transport_start3(
pjsip_endpoint *endpt,
const pjsip_tcp_transport_cfg *cfg,
pjsip_tpfactory **p_factory
)
{
pj_pool_t *pool;
pj_sock_t sock = PJ_INVALID_SOCKET;
struct tcp_listener *listener;
pj_activesock_cfg asock_cfg;
pj_activesock_cb listener_cb;
pj_sockaddr *listener_addr;
int addr_len;
pj_status_t status;
/* Sanity check */
PJ_ASSERT_RETURN(endpt && cfg->async_cnt, PJ_EINVAL);
/* Verify that address given in a_name (if any) is valid */
if (cfg->addr_name.host.slen) {
pj_sockaddr tmp;
status = pj_sockaddr_init(cfg->af, &tmp, &cfg->addr_name.host,
(pj_uint16_t)cfg->addr_name.port);
if (status != PJ_SUCCESS || !pj_sockaddr_has_addr(&tmp) ||
(cfg->af==pj_AF_INET() &&
tmp.ipv4.sin_addr.s_addr==PJ_INADDR_NONE))
{
/* Invalid address */
return PJ_EINVAL;
}
}
pool = pjsip_endpt_create_pool(endpt, "tcplis", POOL_LIS_INIT,
POOL_LIS_INC);
PJ_ASSERT_RETURN(pool, PJ_ENOMEM);
listener = PJ_POOL_ZALLOC_T(pool, struct tcp_listener);
listener->factory.pool = pool;
listener->factory.type = cfg->af==pj_AF_INET() ? PJSIP_TRANSPORT_TCP :
PJSIP_TRANSPORT_TCP6;
listener->factory.type_name = (char*)
pjsip_transport_get_type_name(listener->factory.type);
listener->factory.flag =
pjsip_transport_get_flag_from_type(listener->factory.type);
listener->qos_type = cfg->qos_type;
pj_memcpy(&listener->qos_params, &cfg->qos_params,
sizeof(cfg->qos_params));
pj_ansi_strcpy(listener->factory.obj_name, "tcplis");
if (listener->factory.type==PJSIP_TRANSPORT_TCP6)
pj_ansi_strcat(listener->factory.obj_name, "6");
status = pj_lock_create_recursive_mutex(pool, listener->factory.obj_name,
&listener->factory.lock);
if (status != PJ_SUCCESS)
goto on_error;
/* Create socket */
status = pj_sock_socket(cfg->af, pj_SOCK_STREAM(), 0, &sock);
if (status != PJ_SUCCESS)
goto on_error;
/* Apply QoS, if specified */
status = pj_sock_apply_qos2(sock, cfg->qos_type, &cfg->qos_params,
2, listener->factory.obj_name,
"SIP TCP listener socket");
/* Bind address may be different than factory.local_addr because
* factory.local_addr will be resolved below.
*/
pj_sockaddr_cp(&listener->bound_addr, &cfg->bind_addr);
/* Bind socket */
listener_addr = &listener->factory.local_addr;
pj_sockaddr_cp(listener_addr, &cfg->bind_addr);
status = pj_sock_bind(sock, listener_addr,
pj_sockaddr_get_len(listener_addr));
if (status != PJ_SUCCESS)
goto on_error;
/* Retrieve the bound address */
addr_len = pj_sockaddr_get_len(listener_addr);
status = pj_sock_getsockname(sock, listener_addr, &addr_len);
if (status != PJ_SUCCESS)
goto on_error;
/* If published host/IP is specified, then use that address as the
* listener advertised address.
*/
if (cfg->addr_name.host.slen) {
/* Copy the address */
listener->factory.addr_name = cfg->addr_name;
//.........这里部分代码省略.........
示例7: PJ_DEF
/*
* pj_ioqueue_create()
*
* Create select ioqueue.
*/
PJ_DEF(pj_status_t) pj_ioqueue_create( pj_pool_t *pool,
pj_size_t max_fd,
pj_ioqueue_t **p_ioqueue)
{
pj_ioqueue_t *ioqueue;
pj_status_t rc;
pj_lock_t *lock;
int i;
/* Check that arguments are valid. */
PJ_ASSERT_RETURN(pool != NULL && p_ioqueue != NULL &&
max_fd > 0, PJ_EINVAL);
/* Check that size of pj_ioqueue_op_key_t is sufficient */
PJ_ASSERT_RETURN(sizeof(pj_ioqueue_op_key_t)-sizeof(void*) >=
sizeof(union operation_key), PJ_EBUG);
ioqueue = pj_pool_alloc(pool, sizeof(pj_ioqueue_t));
ioqueue_init(ioqueue);
ioqueue->max = max_fd;
ioqueue->count = 0;
pj_list_init(&ioqueue->active_list);
#if PJ_IOQUEUE_HAS_SAFE_UNREG
/* When safe unregistration is used (the default), we pre-create
* all keys and put them in the free list.
*/
/* Mutex to protect key's reference counter
* We don't want to use key's mutex or ioqueue's mutex because
* that would create deadlock situation in some cases.
*/
rc = pj_mutex_create_simple(pool, NULL, &ioqueue->ref_cnt_mutex);
if (rc != PJ_SUCCESS)
return rc;
/* Init key list */
pj_list_init(&ioqueue->free_list);
pj_list_init(&ioqueue->closing_list);
/* Pre-create all keys according to max_fd */
for ( i=0; i<max_fd; ++i) {
pj_ioqueue_key_t *key;
key = PJ_POOL_ALLOC_T(pool, pj_ioqueue_key_t);
key->ref_count = 0;
rc = pj_lock_create_recursive_mutex(pool, NULL, &key->lock);
if (rc != PJ_SUCCESS) {
key = ioqueue->free_list.next;
while (key != &ioqueue->free_list) {
pj_lock_destroy(key->lock);
key = key->next;
}
pj_mutex_destroy(ioqueue->ref_cnt_mutex);
return rc;
}
pj_list_push_back(&ioqueue->free_list, key);
}
#endif
rc = pj_lock_create_simple_mutex(pool, "ioq%p", &lock);
if (rc != PJ_SUCCESS)
return rc;
rc = pj_ioqueue_set_lock(ioqueue, lock, PJ_TRUE);
if (rc != PJ_SUCCESS)
return rc;
ioqueue->epfd = os_epoll_create(max_fd);
if (ioqueue->epfd < 0) {
ioqueue_destroy(ioqueue);
return PJ_RETURN_OS_ERROR(pj_get_native_os_error());
}
/*ioqueue->events = pj_pool_calloc(pool, max_fd, sizeof(struct epoll_event));
PJ_ASSERT_RETURN(ioqueue->events != NULL, PJ_ENOMEM);
ioqueue->queue = pj_pool_calloc(pool, max_fd, sizeof(struct queue));
PJ_ASSERT_RETURN(ioqueue->queue != NULL, PJ_ENOMEM);
*/
PJ_LOG(4, ("pjlib", "epoll I/O Queue created (%p)", ioqueue));
*p_ioqueue = ioqueue;
return PJ_SUCCESS;
}
示例8: PJ_DEF
/*
* Create server.
*/
PJ_DEF(pj_status_t) pj_turn_srv_create(pj_pool_factory *pf,
pj_turn_srv **p_srv)
{
pj_pool_t *pool;
pj_stun_session_cb sess_cb;
pj_turn_srv *srv;
unsigned i;
pj_status_t status;
PJ_ASSERT_RETURN(pf && p_srv, PJ_EINVAL);
/* Create server and init core settings */
pool = pj_pool_create(pf, "srv%p", 1000, 1000, NULL);
srv = PJ_POOL_ZALLOC_T(pool, pj_turn_srv);
srv->obj_name = pool->obj_name;
srv->core.pf = pf;
srv->core.pool = pool;
srv->core.tls_key = srv->core.tls_data = -1;
/* Create ioqueue */
status = pj_ioqueue_create(pool, MAX_HANDLES, &srv->core.ioqueue);
if (status != PJ_SUCCESS)
goto on_error;
/* Server mutex */
status = pj_lock_create_recursive_mutex(pool, srv->obj_name,
&srv->core.lock);
if (status != PJ_SUCCESS)
goto on_error;
/* Allocate TLS */
status = pj_thread_local_alloc(&srv->core.tls_key);
if (status != PJ_SUCCESS)
goto on_error;
status = pj_thread_local_alloc(&srv->core.tls_data);
if (status != PJ_SUCCESS)
goto on_error;
/* Create timer heap */
status = pj_timer_heap_create(pool, MAX_TIMER, &srv->core.timer_heap);
if (status != PJ_SUCCESS)
goto on_error;
/* Configure lock for the timer heap */
pj_timer_heap_set_lock(srv->core.timer_heap, srv->core.lock, PJ_FALSE);
/* Array of listeners */
srv->core.listener = (pj_turn_listener**)
pj_pool_calloc(pool, MAX_LISTENERS,
sizeof(srv->core.listener[0]));
/* Create hash tables */
srv->tables.alloc = pj_hash_create(pool, MAX_CLIENTS);
srv->tables.res = pj_hash_create(pool, MAX_CLIENTS);
/* Init ports settings */
srv->ports.min_udp = srv->ports.next_udp = MIN_PORT;
srv->ports.max_udp = MAX_PORT;
srv->ports.min_tcp = srv->ports.next_tcp = MIN_PORT;
srv->ports.max_tcp = MAX_PORT;
/* Init STUN config */
pj_stun_config_init(&srv->core.stun_cfg, pf, 0, srv->core.ioqueue,
srv->core.timer_heap);
/* Init STUN credential */
srv->core.cred.type = PJ_STUN_AUTH_CRED_DYNAMIC;
srv->core.cred.data.dyn_cred.user_data = srv;
srv->core.cred.data.dyn_cred.get_auth = &pj_turn_get_auth;
srv->core.cred.data.dyn_cred.get_password = &pj_turn_get_password;
srv->core.cred.data.dyn_cred.verify_nonce = &pj_turn_verify_nonce;
/* Create STUN session to handle new allocation */
pj_bzero(&sess_cb, sizeof(sess_cb));
sess_cb.on_rx_request = &on_rx_stun_request;
sess_cb.on_send_msg = &on_tx_stun_msg;
status = pj_stun_session_create(&srv->core.stun_cfg, srv->obj_name,
&sess_cb, PJ_FALSE, NULL,
&srv->core.stun_sess);
if (status != PJ_SUCCESS) {
goto on_error;
}
pj_stun_session_set_user_data(srv->core.stun_sess, srv);
pj_stun_session_set_credential(srv->core.stun_sess, PJ_STUN_AUTH_LONG_TERM,
&srv->core.cred);
/* Array of worker threads */
srv->core.thread_cnt = MAX_THREADS;
srv->core.thread = (pj_thread_t**)
pj_pool_calloc(pool, srv->core.thread_cnt,
sizeof(pj_thread_t*));
/* Start the worker threads */
//.........这里部分代码省略.........
示例9: PJ_DEF
/*
* This is the public API to create, initialize, register, and start the
* TLS listener.
*/
PJ_DEF(pj_status_t) pjsip_tls_transport_start (pjsip_endpoint *endpt,
const pjsip_tls_setting *opt,
const pj_sockaddr_in *local,
const pjsip_host_port *a_name,
unsigned async_cnt,
pjsip_tpfactory **p_factory)
{
pj_pool_t *pool;
struct tls_listener *listener;
pj_ssl_sock_param ssock_param;
pj_sockaddr_in *listener_addr;
pj_bool_t has_listener;
pj_status_t status;
/* Sanity check */
PJ_ASSERT_RETURN(endpt && async_cnt, PJ_EINVAL);
/* Verify that address given in a_name (if any) is valid */
if (a_name && a_name->host.slen) {
pj_sockaddr_in tmp;
status = pj_sockaddr_in_init(&tmp, &a_name->host,
(pj_uint16_t)a_name->port);
if (status != PJ_SUCCESS || tmp.sin_addr.s_addr == PJ_INADDR_ANY ||
tmp.sin_addr.s_addr == PJ_INADDR_NONE)
{
/* Invalid address */
return PJ_EINVAL;
}
}
pool = pjsip_endpt_create_pool(endpt, "tlslis", POOL_LIS_INIT,
POOL_LIS_INC);
PJ_ASSERT_RETURN(pool, PJ_ENOMEM);
listener = PJ_POOL_ZALLOC_T(pool, struct tls_listener);
listener->factory.pool = pool;
listener->factory.type = PJSIP_TRANSPORT_TLS;
listener->factory.type_name = "tls";
listener->factory.flag =
pjsip_transport_get_flag_from_type(PJSIP_TRANSPORT_TLS);
pj_ansi_strcpy(listener->factory.obj_name, "tlslis");
if (opt)
pjsip_tls_setting_copy(pool, &listener->tls_setting, opt);
else
pjsip_tls_setting_default(&listener->tls_setting);
status = pj_lock_create_recursive_mutex(pool, "tlslis",
&listener->factory.lock);
if (status != PJ_SUCCESS)
goto on_error;
if (async_cnt > MAX_ASYNC_CNT)
async_cnt = MAX_ASYNC_CNT;
/* Build SSL socket param */
pj_ssl_sock_param_default(&ssock_param);
ssock_param.cb.on_accept_complete = &on_accept_complete;
ssock_param.cb.on_data_read = &on_data_read;
ssock_param.cb.on_data_sent = &on_data_sent;
ssock_param.async_cnt = async_cnt;
ssock_param.ioqueue = pjsip_endpt_get_ioqueue(endpt);
ssock_param.require_client_cert = listener->tls_setting.require_client_cert;
ssock_param.timeout = listener->tls_setting.timeout;
ssock_param.user_data = listener;
ssock_param.verify_peer = PJ_FALSE; /* avoid SSL socket closing the socket
* due to verification error */
if (ssock_param.send_buffer_size < PJSIP_MAX_PKT_LEN)
ssock_param.send_buffer_size = PJSIP_MAX_PKT_LEN;
if (ssock_param.read_buffer_size < PJSIP_MAX_PKT_LEN)
ssock_param.read_buffer_size = PJSIP_MAX_PKT_LEN;
ssock_param.ciphers_num = listener->tls_setting.ciphers_num;
ssock_param.ciphers = listener->tls_setting.ciphers;
ssock_param.qos_type = listener->tls_setting.qos_type;
ssock_param.qos_ignore_error = listener->tls_setting.qos_ignore_error;
pj_memcpy(&ssock_param.qos_params, &listener->tls_setting.qos_params,
sizeof(ssock_param.qos_params));
has_listener = PJ_FALSE;
switch(listener->tls_setting.method) {
case PJSIP_TLSV1_METHOD:
ssock_param.proto = PJ_SSL_SOCK_PROTO_TLS1;
break;
case PJSIP_SSLV2_METHOD:
ssock_param.proto = PJ_SSL_SOCK_PROTO_SSL2;
break;
case PJSIP_SSLV3_METHOD:
ssock_param.proto = PJ_SSL_SOCK_PROTO_SSL3;
break;
case PJSIP_SSLV23_METHOD:
ssock_param.proto = PJ_SSL_SOCK_PROTO_SSL23;
break;
default:
//.........这里部分代码省略.........
示例10: PJ_DEF
/*
* Create.
*/
PJ_DEF(pj_status_t) pj_turn_sock_create(pj_stun_config *cfg,
int af,
pj_turn_tp_type conn_type,
const pj_turn_sock_cb *cb,
const pj_turn_sock_cfg *setting,
void *user_data,
pj_turn_sock **p_turn_sock)
{
pj_turn_sock *turn_sock;
pj_turn_session_cb sess_cb;
pj_turn_sock_cfg default_setting;
pj_pool_t *pool;
const char *name_tmpl;
pj_status_t status;
PJ_ASSERT_RETURN(cfg && p_turn_sock, PJ_EINVAL);
PJ_ASSERT_RETURN(af==pj_AF_INET() || af==pj_AF_INET6(), PJ_EINVAL);
PJ_ASSERT_RETURN(conn_type!=PJ_TURN_TP_TCP || PJ_HAS_TCP, PJ_EINVAL);
if (!setting) {
pj_turn_sock_cfg_default(&default_setting);
setting = &default_setting;
}
switch (conn_type) {
case PJ_TURN_TP_UDP:
name_tmpl = "udprel%p";
break;
case PJ_TURN_TP_TCP:
name_tmpl = "tcprel%p";
break;
default:
PJ_ASSERT_RETURN(!"Invalid TURN conn_type", PJ_EINVAL);
name_tmpl = "tcprel%p";
break;
}
/* Create and init basic data structure */
pool = pj_pool_create(cfg->pf, name_tmpl, PJNATH_POOL_LEN_TURN_SOCK,
PJNATH_POOL_INC_TURN_SOCK, NULL);
turn_sock = PJ_POOL_ZALLOC_T(pool, pj_turn_sock);
turn_sock->pool = pool;
turn_sock->obj_name = pool->obj_name;
turn_sock->user_data = user_data;
turn_sock->af = af;
turn_sock->conn_type = conn_type;
/* Copy STUN config (this contains ioqueue, timer heap, etc.) */
pj_memcpy(&turn_sock->cfg, cfg, sizeof(*cfg));
/* Copy setting (QoS parameters etc */
pj_memcpy(&turn_sock->setting, setting, sizeof(*setting));
/* Set callback */
if (cb) {
pj_memcpy(&turn_sock->cb, cb, sizeof(*cb));
}
/* Create lock */
status = pj_lock_create_recursive_mutex(pool, turn_sock->obj_name,
&turn_sock->lock);
if (status != PJ_SUCCESS) {
PJ_LOG(1, ("turn_session.c", "!!! TURN DEALLOCATE !!! in pj_turn_sock_create() destroy turn_sock for creating mutex failed."));
destroy(turn_sock);
return status;
}
/* Init timer */
pj_timer_entry_init(&turn_sock->timer, TIMER_NONE, turn_sock, &timer_cb);
/* Init TURN session */
pj_bzero(&sess_cb, sizeof(sess_cb));
sess_cb.on_send_pkt = &turn_on_send_pkt;
sess_cb.on_channel_bound = &turn_on_channel_bound;
sess_cb.on_rx_data = &turn_on_rx_data;
sess_cb.on_state = &turn_on_state;
// DEAN Added 2013-03-19
sess_cb.on_turn_srv_allocated = &turn_on_allocated;
status = pj_turn_session_create(cfg, pool->obj_name, af, conn_type,
&sess_cb, 0, turn_sock, &turn_sock->sess);
if (status != PJ_SUCCESS) {
PJ_LOG(1, ("turn_session.c", "!!! TURN DEALLOCATE !!! in pj_turn_sock_create() destroy turn_sock for creating turn session failed."));
destroy(turn_sock);
return status;
}
/* Note: socket and ioqueue will be created later once the TURN server
* has been resolved.
*/
*p_turn_sock = turn_sock;
return PJ_SUCCESS;
}
示例11: transport_create
/*!
* \brief Create a pjsip transport.
*/
static int transport_create(void *data)
{
struct transport_create_data *create_data = data;
struct ws_transport *newtransport = NULL;
pjsip_endpoint *endpt = ast_sip_get_pjsip_endpoint();
struct pjsip_tpmgr *tpmgr = pjsip_endpt_get_tpmgr(endpt);
pj_pool_t *pool;
pj_str_t buf;
pj_status_t status;
newtransport = ao2_t_alloc_options(sizeof(*newtransport), transport_dtor,
AO2_ALLOC_OPT_LOCK_NOLOCK, "pjsip websocket transport");
if (!newtransport) {
ast_log(LOG_ERROR, "Failed to allocate WebSocket transport.\n");
goto on_error;
}
newtransport->transport.endpt = endpt;
if (!(pool = pjsip_endpt_create_pool(endpt, "ws", 512, 512))) {
ast_log(LOG_ERROR, "Failed to allocate WebSocket endpoint pool.\n");
goto on_error;
}
newtransport->transport.pool = pool;
newtransport->ws_session = create_data->ws_session;
/* Keep the session until transport dies */
ast_websocket_ref(newtransport->ws_session);
status = pj_atomic_create(pool, 0, &newtransport->transport.ref_cnt);
if (status != PJ_SUCCESS) {
goto on_error;
}
status = pj_lock_create_recursive_mutex(pool, pool->obj_name, &newtransport->transport.lock);
if (status != PJ_SUCCESS) {
goto on_error;
}
pj_sockaddr_parse(pj_AF_UNSPEC(), 0, pj_cstr(&buf, ast_sockaddr_stringify(ast_websocket_remote_address(newtransport->ws_session))), &newtransport->transport.key.rem_addr);
newtransport->transport.key.rem_addr.addr.sa_family = pj_AF_INET();
newtransport->transport.key.type = ast_websocket_is_secure(newtransport->ws_session) ? transport_type_wss : transport_type_ws;
newtransport->transport.addr_len = pj_sockaddr_get_len(&newtransport->transport.key.rem_addr);
pj_sockaddr_cp(&newtransport->transport.local_addr, &newtransport->transport.key.rem_addr);
newtransport->transport.local_name.host.ptr = (char *)pj_pool_alloc(pool, newtransport->transport.addr_len+4);
pj_sockaddr_print(&newtransport->transport.key.rem_addr, newtransport->transport.local_name.host.ptr, newtransport->transport.addr_len+4, 0);
newtransport->transport.local_name.host.slen = pj_ansi_strlen(newtransport->transport.local_name.host.ptr);
newtransport->transport.local_name.port = pj_sockaddr_get_port(&newtransport->transport.key.rem_addr);
newtransport->transport.type_name = (char *)pjsip_transport_get_type_name(newtransport->transport.key.type);
newtransport->transport.flag = pjsip_transport_get_flag_from_type((pjsip_transport_type_e)newtransport->transport.key.type);
newtransport->transport.info = (char *)pj_pool_alloc(newtransport->transport.pool, 64);
newtransport->transport.tpmgr = tpmgr;
newtransport->transport.send_msg = &ws_send_msg;
newtransport->transport.destroy = &ws_destroy;
status = pjsip_transport_register(newtransport->transport.tpmgr,
(pjsip_transport *)newtransport);
if (status != PJ_SUCCESS) {
goto on_error;
}
/* Add a reference for pjsip transport manager */
ao2_ref(newtransport, +1);
newtransport->rdata.tp_info.transport = &newtransport->transport;
newtransport->rdata.tp_info.pool = pjsip_endpt_create_pool(endpt, "rtd%p",
PJSIP_POOL_RDATA_LEN, PJSIP_POOL_RDATA_INC);
if (!newtransport->rdata.tp_info.pool) {
ast_log(LOG_ERROR, "Failed to allocate WebSocket rdata.\n");
pjsip_transport_destroy((pjsip_transport *)newtransport);
goto on_error;
}
create_data->transport = newtransport;
return 0;
on_error:
ao2_cleanup(newtransport);
return -1;
}
示例12: Pj_Recursive_Mutex_Lock
//
// Default constructor.
//
explicit Pj_Recursive_Mutex_Lock(Pj_Pool *pool, const char *name = NULL)
: Pj_Lock(NULL)
{
pj_lock_create_recursive_mutex(pool->pool_(), name, &lock_);
}
示例13: PJ_DEF
/* Start loop transport. */
PJ_DEF(pj_status_t) pjsip_loop_start( pjsip_endpoint *endpt,
pjsip_transport **transport)
{
pj_pool_t *pool;
struct loop_transport *loop;
pj_status_t status;
/* Create pool. */
pool = pjsip_endpt_create_pool(endpt, "loop", 4000, 4000);
if (!pool)
return PJ_ENOMEM;
/* Create the loop structure. */
loop = pj_pool_zalloc(pool, sizeof(struct loop_transport));
/* Initialize transport properties. */
pj_ansi_snprintf(loop->base.obj_name, sizeof(loop->base.obj_name),
"loop%p", loop);
loop->base.pool = pool;
status = pj_atomic_create(pool, 0, &loop->base.ref_cnt);
if (status != PJ_SUCCESS)
goto on_error;
status = pj_lock_create_recursive_mutex(pool, "loop", &loop->base.lock);
if (status != PJ_SUCCESS)
goto on_error;
loop->base.key.type = PJSIP_TRANSPORT_LOOP_DGRAM;
//loop->base.key.rem_addr.sa_family = PJ_AF_INET;
loop->base.type_name = "LOOP-DGRAM";
loop->base.info = "LOOP-DGRAM";
loop->base.flag = PJSIP_TRANSPORT_DATAGRAM;
loop->base.local_name.host = pj_str(ADDR_LOOP_DGRAM);
loop->base.local_name.port =
pjsip_transport_get_default_port_for_type(loop->base.key.type);
loop->base.addr_len = sizeof(pj_sockaddr_in);
loop->base.endpt = endpt;
loop->base.tpmgr = pjsip_endpt_get_tpmgr(endpt);
loop->base.send_msg = &loop_send_msg;
loop->base.destroy = &loop_destroy;
pj_list_init(&loop->recv_list);
pj_list_init(&loop->send_list);
/* Create worker thread. */
status = pj_thread_create(pool, "loop",
&loop_transport_worker_thread, loop, 0,
PJ_THREAD_SUSPENDED, &loop->thread);
if (status != PJ_SUCCESS)
goto on_error;
/* Register to transport manager. */
status = pjsip_transport_register( loop->base.tpmgr, &loop->base);
if (status != PJ_SUCCESS)
goto on_error;
/* Start the thread. */
status = pj_thread_resume(loop->thread);
if (status != PJ_SUCCESS)
goto on_error;
/*
* Done.
*/
if (transport)
*transport = &loop->base;
return PJ_SUCCESS;
on_error:
if (loop->base.lock)
pj_lock_destroy(loop->base.lock);
if (loop->thread)
pj_thread_destroy(loop->thread);
if (loop->base.ref_cnt)
pj_atomic_destroy(loop->base.ref_cnt);
pjsip_endpt_release_pool(endpt, loop->pool);
return status;
}
示例14: PJ_DEF
/*
* This is the public API to create, initialize, register, and start the
* TCP listener.
*/
PJ_DEF(pj_status_t) pjsip_tcp_transport_start2(pjsip_endpoint *endpt,
const pj_sockaddr_in *local,
const pjsip_host_port *a_name,
unsigned async_cnt,
pjsip_tpfactory **p_factory)
{
pj_pool_t *pool;
pj_sock_t sock = PJ_INVALID_SOCKET;
struct tcp_listener *listener;
pj_activesock_cfg asock_cfg;
pj_activesock_cb listener_cb;
pj_sockaddr_in *listener_addr;
int addr_len;
pj_status_t status;
/* Sanity check */
PJ_ASSERT_RETURN(endpt && async_cnt, PJ_EINVAL);
/* Verify that address given in a_name (if any) is valid */
if (a_name && a_name->host.slen) {
pj_sockaddr_in tmp;
status = pj_sockaddr_in_init(&tmp, &a_name->host,
(pj_uint16_t)a_name->port);
if (status != PJ_SUCCESS || tmp.sin_addr.s_addr == PJ_INADDR_ANY ||
tmp.sin_addr.s_addr == PJ_INADDR_NONE)
{
/* Invalid address */
return PJ_EINVAL;
}
}
pool = pjsip_endpt_create_pool(endpt, "tcplis", POOL_LIS_INIT,
POOL_LIS_INC);
PJ_ASSERT_RETURN(pool, PJ_ENOMEM);
listener = PJ_POOL_ZALLOC_T(pool, struct tcp_listener);
listener->factory.pool = pool;
listener->factory.type = PJSIP_TRANSPORT_TCP;
listener->factory.type_name = "tcp";
listener->factory.flag =
pjsip_transport_get_flag_from_type(PJSIP_TRANSPORT_TCP);
pj_ansi_strcpy(listener->factory.obj_name, "tcplis");
status = pj_lock_create_recursive_mutex(pool, "tcplis",
&listener->factory.lock);
if (status != PJ_SUCCESS)
goto on_error;
/* Create and bind socket */
status = pj_sock_socket(pj_AF_INET(), pj_SOCK_STREAM(), 0, &sock);
if (status != PJ_SUCCESS)
goto on_error;
listener_addr = (pj_sockaddr_in*)&listener->factory.local_addr;
if (local) {
pj_memcpy(listener_addr, local, sizeof(pj_sockaddr_in));
} else {
pj_sockaddr_in_init(listener_addr, NULL, 0);
}
status = pj_sock_bind(sock, listener_addr, sizeof(pj_sockaddr_in));
if (status != PJ_SUCCESS)
goto on_error;
/* Retrieve the bound address */
addr_len = sizeof(pj_sockaddr_in);
status = pj_sock_getsockname(sock, listener_addr, &addr_len);
if (status != PJ_SUCCESS)
goto on_error;
/* If published host/IP is specified, then use that address as the
* listener advertised address.
*/
if (a_name && a_name->host.slen) {
/* Copy the address */
listener->factory.addr_name = *a_name;
pj_strdup(listener->factory.pool, &listener->factory.addr_name.host,
&a_name->host);
listener->factory.addr_name.port = a_name->port;
} else {
/* No published address is given, use the bound address */
/* If the address returns 0.0.0.0, use the default
* interface address as the transport's address.
*/
if (listener_addr->sin_addr.s_addr == 0) {
pj_sockaddr hostip;
status = pj_gethostip(pj_AF_INET(), &hostip);
if (status != PJ_SUCCESS)
goto on_error;
//.........这里部分代码省略.........
示例15: pool_
SipIceTransport::SipIceTransport(pjsip_endpoint* endpt, pj_pool_t& /* pool */,
long /* t_type */,
const std::shared_ptr<IceTransport>& ice,
int comp_id)
: pool_(nullptr, pj_pool_release)
, rxPool_(nullptr, pj_pool_release)
, trData_()
, rdata_()
, ice_(ice)
, comp_id_(comp_id)
{
trData_.self = this;
if (not ice or not ice->isRunning())
throw std::logic_error("ice transport must exist and negotiation completed");
RING_DBG("[email protected]%p {tr=%p}", this, &trData_.base);
auto& base = trData_.base;
pool_.reset(pjsip_endpt_create_pool(endpt, "SipIceTransport.pool", POOL_TP_INIT, POOL_TP_INC));
if (not pool_)
throw std::bad_alloc();
auto pool = pool_.get();
pj_ansi_snprintf(base.obj_name, PJ_MAX_OBJ_NAME, "SipIceTransport");
base.endpt = endpt;
base.tpmgr = pjsip_endpt_get_tpmgr(endpt);
base.pool = pool;
rdata_.tp_info.pool = pool;
// FIXME: not destroyed in case of exception
if (pj_atomic_create(pool, 0, &base.ref_cnt) != PJ_SUCCESS)
throw std::runtime_error("Can't create PJSIP atomic.");
// FIXME: not destroyed in case of exception
if (pj_lock_create_recursive_mutex(pool, "SipIceTransport.mutex", &base.lock) != PJ_SUCCESS)
throw std::runtime_error("Can't create PJSIP mutex.");
auto remote = ice->getRemoteAddress(comp_id);
RING_DBG("SipIceTransport: remote is %s", remote.toString(true).c_str());
pj_sockaddr_cp(&base.key.rem_addr, remote.pjPtr());
base.key.type = PJSIP_TRANSPORT_UDP;//t_type;
base.type_name = (char*)pjsip_transport_get_type_name((pjsip_transport_type_e)base.key.type);
base.flag = pjsip_transport_get_flag_from_type((pjsip_transport_type_e)base.key.type);
base.info = (char*) pj_pool_alloc(pool, TRANSPORT_INFO_LENGTH);
char print_addr[PJ_INET6_ADDRSTRLEN+10];
pj_ansi_snprintf(base.info, TRANSPORT_INFO_LENGTH, "%s to %s",
base.type_name,
pj_sockaddr_print(remote.pjPtr(), print_addr,
sizeof(print_addr), 3));
base.addr_len = remote.getLength();
base.dir = PJSIP_TP_DIR_NONE;//is_server? PJSIP_TP_DIR_INCOMING : PJSIP_TP_DIR_OUTGOING;
base.data = nullptr;
/* Set initial local address */
auto local = ice->getDefaultLocalAddress();
pj_sockaddr_cp(&base.local_addr, local.pjPtr());
sockaddr_to_host_port(pool, &base.local_name, &base.local_addr);
sockaddr_to_host_port(pool, &base.remote_name, remote.pjPtr());
base.send_msg = [](pjsip_transport *transport,
pjsip_tx_data *tdata,
const pj_sockaddr_t *rem_addr, int addr_len,
void *token, pjsip_transport_callback callback) {
auto& this_ = reinterpret_cast<TransportData*>(transport)->self;
return this_->send(tdata, rem_addr, addr_len, token, callback);
};
base.do_shutdown = [](pjsip_transport *transport) -> pj_status_t {
auto& this_ = reinterpret_cast<TransportData*>(transport)->self;
RING_WARN("[email protected]%p: shutdown", this_);
return PJ_SUCCESS;
};
base.destroy = [](pjsip_transport *transport) -> pj_status_t {
auto& this_ = reinterpret_cast<TransportData*>(transport)->self;
RING_WARN("[email protected]%p: destroy", this_);
delete this_;
return PJ_SUCCESS;
};
/* Init rdata */
rxPool_.reset(pjsip_endpt_create_pool(base.endpt,
"SipIceTransport.rtd%p",
PJSIP_POOL_RDATA_LEN,
PJSIP_POOL_RDATA_INC));
if (not rxPool_)
throw std::bad_alloc();
auto rx_pool = rxPool_.get();
rdata_.tp_info.pool = rx_pool;
rdata_.tp_info.transport = &base;
rdata_.tp_info.tp_data = this;
rdata_.tp_info.op_key.rdata = &rdata_;
pj_ioqueue_op_key_init(&rdata_.tp_info.op_key.op_key, sizeof(pj_ioqueue_op_key_t));
rdata_.pkt_info.src_addr = base.key.rem_addr;
rdata_.pkt_info.src_addr_len = sizeof(rdata_.pkt_info.src_addr);
auto rem_addr = &base.key.rem_addr;
pj_sockaddr_print(rem_addr, rdata_.pkt_info.src_name, sizeof(rdata_.pkt_info.src_name), 0);
//.........这里部分代码省略.........