本文整理汇总了C++中zmq_errno函数的典型用法代码示例。如果您正苦于以下问题:C++ zmq_errno函数的具体用法?C++ zmq_errno怎么用?C++ zmq_errno使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了zmq_errno函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: MsgAssert
static void MsgAssert(bool condition)
{
assert(condition);
if ( !condition )
{
int error_code = zmq_errno();
printf("error:%d", error_code);
}
}
示例2: run_test
static void run_test (int opt, T optval, int expected_error, int bounce_test)
{
int rc;
void *ctx = zmq_ctx_new ();
assert (ctx);
void *sb = zmq_socket (ctx, ZMQ_DEALER);
assert (sb);
if (opt) {
rc = zmq_setsockopt(sb, opt, &optval, sizeof (optval));
if (expected_error) {
assert (rc == -1);
assert (zmq_errno () == expected_error);
} else {
assert (rc == 0);
}
}
void *sc = zmq_socket (ctx, ZMQ_DEALER);
assert (sc);
// If a test fails, don't hang for too long
int timeout = 2500;
rc = zmq_setsockopt (sb, ZMQ_RCVTIMEO, &timeout, sizeof (int));
assert (rc == 0);
rc = zmq_setsockopt (sb, ZMQ_SNDTIMEO, &timeout, sizeof (int));
assert (rc == 0);
rc = zmq_setsockopt (sc, ZMQ_RCVTIMEO, &timeout, sizeof (int));
assert (rc == 0);
rc = zmq_setsockopt (sc, ZMQ_SNDTIMEO, &timeout, sizeof (int));
assert (rc == 0);
int interval = -1;
rc = zmq_setsockopt (sc, ZMQ_RECONNECT_IVL, &interval, sizeof (int));
assert (rc == 0);
if (bounce_test) {
const char* endpoint = "ipc://test_filter_ipc.sock";
int rc = zmq_bind (sb, endpoint);
assert (rc == 0);
rc = zmq_connect (sc, endpoint);
assert (rc == 0);
if (bounce_test > 0)
bounce (sb, sc);
else
bounce_fail (sb, sc);
}
close_zero_linger (sc);
close_zero_linger (sb);
rc = zmq_ctx_term (ctx);
assert (rc == 0);
}
示例3: send
inline size_t send (const void *buf_, size_t len_, int flags_ = 0)
{
int nbytes = zmq_send (ptr, buf_, len_, flags_);
if (nbytes >= 0)
return (size_t) nbytes;
if (zmq_errno () == EAGAIN)
return 0;
throw error_t ();
}
示例4: Lzmq_push_error
static int Lzmq_push_error(lua_State *L)
{
const char *error;
lua_pushnil(L);
switch(zmq_errno()) {
case EAGAIN:
lua_pushliteral(L, "timeout");
break;
case ETERM:
lua_pushliteral(L, "closed");
break;
default:
error = zmq_strerror(zmq_errno());
lua_pushlstring(L, error, strlen(error));
break;
}
return 2;
}
示例5: finalize_context
// Finalizer for context
void finalize_context( value v) {
gc_enter_blocking();
int ret = zmq_term( val_data(v));
gc_exit_blocking();
if (ret != 0) {
int err = zmq_errno();
val_throw(alloc_int(err));
}
}
示例6: pollitem_on_output
static int pollitem_on_output(void *socket, void *ehub, void *data) {
int r;
char *message = ehub_on_output(ehub, socket, data);
r = zmq_send(socket, message, strlen(message), 0);
if (r==-1) return zmq_errno();
return 0;
}
示例7: recv
inline bool recv (message_t *msg_, int flags_ = 0)
{
int nbytes = zmq_msg_recv (&(msg_->msg), ptr, flags_);
if (nbytes >= 0)
return true;
if (zmq_errno () == EAGAIN)
return false;
throw error_t ();
}
示例8: test_send_one_connected_one_unconnected
void test_send_one_connected_one_unconnected ()
{
int val;
// TEST 1.
// First we're going to attempt to send messages to two
// pipes, one connected, the other not. We should see
// the PUSH load balancing to both pipes, and hence half
// of the messages getting queued, as connect() creates a
// pipe immediately.
void *to = test_context_socket (ZMQ_PULL);
// Bind the one valid receiver
val = 0;
TEST_ASSERT_SUCCESS_ERRNO (
zmq_setsockopt (to, ZMQ_LINGER, &val, sizeof (val)));
TEST_ASSERT_SUCCESS_ERRNO (zmq_bind (to, "tipc://{6555,0,0}"));
// Create a socket pushing to two endpoints - only 1 message should arrive.
void *from = test_context_socket (ZMQ_PUSH);
val = 0;
TEST_ASSERT_SUCCESS_ERRNO (
zmq_setsockopt (from, ZMQ_LINGER, &val, sizeof (val)));
// This pipe will not connect
TEST_ASSERT_SUCCESS_ERRNO (zmq_connect (from, "tipc://{5556,0}@0.0.0"));
// This pipe will
TEST_ASSERT_SUCCESS_ERRNO (zmq_connect (from, "tipc://{6555,0}@0.0.0"));
// We send 10 messages, 5 should just get stuck in the queue
// for the not-yet-connected pipe
const int send_count = 10;
for (int i = 0; i < send_count; ++i) {
send_string_expect_success (from, "Hello", 0);
}
// We now consume from the connected pipe
// - we should see just 5
int timeout = SETTLE_TIME;
TEST_ASSERT_SUCCESS_ERRNO (
zmq_setsockopt (to, ZMQ_RCVTIMEO, &timeout, sizeof (int)));
int seen = 0;
while (true) {
char buffer[16];
int rc = zmq_recv (to, &buffer, sizeof (buffer), 0);
if (rc == -1) {
TEST_ASSERT_EQUAL_INT (EAGAIN, zmq_errno ());
break; // Break when we didn't get a message
}
seen++;
}
TEST_ASSERT_EQUAL_INT (send_count / 2, seen);
test_context_socket_close (from);
test_context_socket_close (to);
}
示例9: zmq_errno
OpenEphysNetworkEventsClient::~OpenEphysNetworkEventsClient() {
if (zmqSocket) {
if (0 != zmq_disconnect(zmqSocket.get(), endpoint.c_str()) &&
ENOENT != zmq_errno())
{
logZMQError("Unable to disconnect from Open Ephys network events module");
}
}
}
示例10: main
int main (void)
{
setup_test_environment();
size_t len = MAX_SOCKET_STRING;
char my_endpoint[MAX_SOCKET_STRING];
void *ctx = zmq_ctx_new ();
assert (ctx);
// Spawn ZAP handler
// We create and bind ZAP socket in main thread to avoid case
// where child thread does not start up fast enough.
void *handler = zmq_socket (ctx, ZMQ_REP);
assert (handler);
int rc = zmq_bind (handler, "inproc://zeromq.zap.01");
assert (rc == 0);
void *zap_thread = zmq_threadstart (&zap_handler, handler);
void *server = zmq_socket (ctx, ZMQ_DEALER);
assert (server);
void *client = zmq_socket (ctx, ZMQ_DEALER);
assert (client);
rc = zmq_setsockopt (server, ZMQ_ZAP_DOMAIN, "DOMAIN", 6);
assert (rc == 0);
rc = zmq_bind (server, "tcp://127.0.0.1:*");
assert (rc == 0);
rc = zmq_getsockopt (server, ZMQ_LAST_ENDPOINT, my_endpoint, &len);
assert (rc == 0);
rc = zmq_connect (client, my_endpoint);
assert (rc == 0);
s_send (client, "This is a message");
zmq_msg_t msg;
zmq_msg_init (&msg);
rc = zmq_msg_recv (&msg, server, 0);
assert (rc != -1);
assert (streq (zmq_msg_gets (&msg, "Hello"), "World"));
assert (streq (zmq_msg_gets (&msg, "Socket-Type"), "DEALER"));
assert (streq (zmq_msg_gets (&msg, "User-Id"), "anonymous"));
assert (streq (zmq_msg_gets (&msg, "Peer-Address"), "127.0.0.1"));
assert (zmq_msg_gets (&msg, "No Such") == NULL);
assert (zmq_errno () == EINVAL);
zmq_msg_close (&msg);
close_zero_linger (client);
close_zero_linger (server);
// Shutdown
rc = zmq_ctx_term (ctx);
assert (rc == 0);
// Wait until ZAP handler terminates
zmq_threadclose (zap_thread);
return 0;
}
示例11: wrap_zmq_term
//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
static void
wrap_zmq_term(zmq_drv_t *drv)
{
zmqdrv_fprintf("term %p\r\n", drv->zmq_context);
if (0 < drv->zmq_pid_socket.size())
{
for (zmq_pid_socket_map_t::iterator it = drv->zmq_pid_socket.begin(); it != drv->zmq_pid_socket.end(); ++it)
{
zmq_sock_info* si = it->second;
if (si->busy)
{
// Remove socket from erlang vm polling
driver_select(drv->port, si->fd, ERL_DRV_READ, 0);
if (si->out_caller)
{
reply_error(drv->port, si->out_caller, ETERM);
si->out_caller = 0;
zmq_msg_close(&si->out_msg);
}
if (si->in_caller)
{
reply_error(drv->port, si->in_caller, ETERM);
si->in_caller = 0;
}
if (si->poll_caller)
{
send_events(drv->port, si->poll_caller, (uint32_t)ZMQ_POLLERR);
si->poll_caller = 0;
}
si->busy = false;
}
}
// TODO: Remove if zeromq itself ever gets fixed. As zmq_term() is a
// blocking call, and will not return until all sockets are closed,
// so do not allow it to be called while there are open sockets.
drv->terminating = true;
reply_error(drv->port, driver_caller(drv->port), EAGAIN);
return;
}
// cross fingers and hope zmq_term() doesn't block, else we hardlock.
if (0 != zmq_term(drv->zmq_context))
{
reply_error(drv->port, driver_caller(drv->port), zmq_errno());
return;
}
drv->zmq_context = NULL;
reply_ok(drv->port, driver_caller(drv->port));
}
示例12: zmq_msg_init
void * teRDNetwork::GetData(u32 & dataSize)
{
if(!zmqContext)
return NULL;
zmq_msg_init(&getDataRequest);
s32 errCode = zmq_recv(zmqDataDrop, &getDataRequest, 0);
if(errCode != 0)
{
if(zmq_errno() != EAGAIN)
TE_LOG_WRN("0mq recv data err %i : %i", errCode, zmq_errno());
return NULL;
}
dataSize = zmq_msg_size(&getDataRequest);
return zmq_msg_data(&getDataRequest);
}
示例13: zmqdrv_recv
static void
zmqdrv_recv(zmq_drv_t *drv, ErlIOVec *ev)
{
ErlDrvBinary* bin = ev->binv[1];
char* bytes = bin->orig_bytes;
uint32_t idx = ntohl(*(uint32_t*)(bytes+1));
zmq_sock_info* si = drv->get_socket_info(idx);
if (idx > drv->zmq_socket_count || !si) {
zmqdrv_error_code(drv, ENODEV);
return;
}
if (si->active_mode) {
zmqdrv_error_code(drv, EINVAL);
return;
}
if (si->in_caller != 0) {
// Previous recv() call in passive mode didn't complete.
// The owner must be blocked waiting for result.
zmqdrv_error_code(drv, EBUSY);
return;
}
uint32_t events;
size_t events_size = sizeof(events);
zmq_getsockopt(si->socket, ZMQ_EVENTS, &events, &events_size);
if (events == 0)
si->in_caller = driver_caller(drv->port);
else {
msg_t msg;
if (zmq_recv(si->socket, &msg, ZMQ_NOBLOCK) == 0)
zmqdrv_ok_binary(drv, driver_caller(drv->port), zmq_msg_data(&msg), zmq_msg_size(&msg));
else if (zmq_errno() == EAGAIN) {
// No input available. Make the caller wait by not returning result
si->in_caller = driver_caller(drv->port);
} else
zmqdrv_error_code(drv, zmq_errno());
}
}
示例14: zmq_strerror
effBOOL EFFNetServer::Bind(effString address)
{
if ( zmq_bind(socket, EFFSTRING2ANSI(address)) == -1 )
{
std::string error = zmq_strerror(zmq_errno());
return effFALSE;
}
return effTRUE;
}
示例15: accept
void zmq::tcp_listener_t::in_event ()
{
fd_t fd = accept ();
// If connection was reset by the peer in the meantime, just ignore it.
// TODO: Handle specific errors like ENFILE/EMFILE etc.
if (fd == retired_fd) {
socket->event_accept_failed (endpoint, zmq_errno ());
return;
}
int rc = tune_tcp_socket (fd);
rc = rc
| tune_tcp_keepalives (
fd, options.tcp_keepalive, options.tcp_keepalive_cnt,
options.tcp_keepalive_idle, options.tcp_keepalive_intvl);
rc = rc | tune_tcp_maxrt (fd, options.tcp_maxrt);
if (rc != 0) {
socket->event_accept_failed (endpoint, zmq_errno ());
return;
}
// Create the engine object for this connection.
stream_engine_t *engine =
new (std::nothrow) stream_engine_t (fd, options, endpoint);
alloc_assert (engine);
// Choose I/O thread to run connecter in. Given that we are already
// running in an I/O thread, there must be at least one available.
io_thread_t *io_thread = choose_io_thread (options.affinity);
zmq_assert (io_thread);
// Create and launch a session object.
session_base_t *session =
session_base_t::create (io_thread, false, socket, options, NULL);
errno_assert (session);
session->inc_seqnum ();
launch_child (session);
send_attach (session, engine, false);
socket->event_accepted (endpoint, (int) fd);
}