本文整理汇总了C++中TAOLIB_DEBUG函数的典型用法代码示例。如果您正苦于以下问题:C++ TAOLIB_DEBUG函数的具体用法?C++ TAOLIB_DEBUG怎么用?C++ TAOLIB_DEBUG使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了TAOLIB_DEBUG函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ACE_GUARD_RETURN
/// Bind the dispatcher with the request id.
int
TAO_Muxed_TMS::bind_dispatcher (CORBA::ULong request_id,
ACE_Intrusive_Auto_Ptr<TAO_Reply_Dispatcher> rd)
{
ACE_GUARD_RETURN (ACE_Lock,
ace_mon,
*this->lock_,
-1);
if (rd == 0)
{
if (TAO_debug_level > 0)
{
TAOLIB_DEBUG ((LM_DEBUG,
ACE_TEXT ("TAO (%P|%t) - TAO_Muxed_TMS::bind_dispatcher, ")
ACE_TEXT ("null reply dispatcher\n")));
}
return 0;
}
int const result = this->dispatcher_table_.bind (request_id, rd);
if (result != 0)
{
if (TAO_debug_level > 0)
TAOLIB_DEBUG ((LM_DEBUG,
ACE_TEXT ("TAO (%P|%t) - TAO_Muxed_TMS::bind_dispatcher, ")
ACE_TEXT ("bind dispatcher failed: result = %d, request id = %d\n"),
result, request_id));
return -1;
}
return 0;
}
示例2: TAOLIB_DEBUG
void
TAO_RTScheduler_ORB_Initializer::post_init (PortableInterceptor::ORBInitInfo_ptr info)
{
// @@ This is busted. TAO_ORBInitInfo should do proper reference
// counting.
// Narrow to a TAO_ORBInitInfo object to get access to the
// orb_core() TAO extension.
//TAO_ORBInitInfo_var tao_info = TAO_ORBInitInfo::_narrow (info
// );
if (TAO_debug_level > 0)
TAOLIB_DEBUG ((LM_DEBUG,
"In post_init\n"));
CORBA::Object_var rt_current_obj =
info->resolve_initial_references (TAO_OBJID_RTCURRENT);
RTCORBA::Current_var rt_current =
RTCORBA::Current::_narrow (rt_current_obj.in ());
if (CORBA::is_nil (rt_current.in ()))
{
TAOLIB_DEBUG ((LM_DEBUG,
"(%P|%t) ::post_init\n"
"(%P|%t) Unable to narrow to RTCORBA::Current\n"));
throw ::CORBA::INTERNAL ();
}
this->current_->rt_current (rt_current.in ());
}
示例3: TAOLIB_DEBUG
CORBA::Boolean
TAO_Linear_Network_Priority_Mapping::to_network (
RTCORBA::Priority corba_priority,
RTCORBA::NetworkPriority &network_priority)
{
if (TAO_debug_level)
TAOLIB_DEBUG ((LM_DEBUG,
"TAO_Linear_Network_Priority_Mapping::to_network corba_priority %d\n",
corba_priority));
int const total_slots = sizeof (dscp) / sizeof (int);
int array_slot =
static_cast<int> (((corba_priority - RTCORBA::minPriority) / double (RTCORBA::maxPriority - RTCORBA::minPriority)) * total_slots);
if (array_slot == total_slots)
array_slot -= 1;
network_priority = dscp[array_slot];
if (TAO_debug_level)
TAOLIB_DEBUG ((LM_DEBUG,
"TAO_Linear_Network_Priority_Mapping::to_network = %x\n",
network_priority));
return 1;
}
示例4: VERIFY_MAP
CORBA::Boolean
CORBA::ValueBase::_tao_write_repository_id (TAO_OutputCDR &strm,
ACE_CString& id)
{
#ifdef TAO_HAS_VALUETYPE_OUT_INDIRECTION
VERIFY_MAP (TAO_OutputCDR, repo_id_map, Repo_Id_Map);
char* pos = 0;
if (strm.get_repo_id_map ()->get()->find (id, pos) == 0)
{
if (!strm.write_long (TAO_OBV_GIOP_Flags::Indirection_tag))
{
return false;
}
CORBA::Long offset= -strm.offset (pos);
if (TAO_debug_level)
{
TAOLIB_DEBUG ((LM_DEBUG,
ACE_TEXT ("TAO (%P|%t) - %N:%l ValueBase::_tao_write_repository_id, id %C indirection %d\n"),
id.c_str(), offset));
}
if (!strm.write_long (offset))
{
return false;
}
}
else
{
if (strm.align_write_ptr (ACE_CDR::LONG_SIZE) != 0)
{
throw CORBA::INTERNAL ();
}
if (strm.get_repo_id_map ()->get ()->bind (id, strm.current()->wr_ptr ()) != 0)
{
throw CORBA::INTERNAL ();
}
if (TAO_debug_level)
{
TAOLIB_DEBUG ((LM_DEBUG,
ACE_TEXT ("TAO (%P|%t) - %N:%l ValueBase::_tao_write_repository_id, bound %C - %x\n"),
id.c_str (), strm.current()->wr_ptr ()));
}
if (! strm.write_string (id.c_str ()))
{
return false;
}
}
#else
if (! strm.write_string (id.c_str ()))
{
return 0;
}
#endif
return 1;
}
示例5: TAOLIB_DEBUG
int
TAO_Connection_Handler::handle_input_internal (
ACE_HANDLE h, ACE_Event_Handler * eh)
{
// Let the transport know that it is used
(void) this->transport ()->update_transport ();
// Grab the transport id now and use the cached value for printing
// since the transport could disappear by the time the thread
// returns.
size_t const t_id = this->transport ()->id ();
if (TAO_debug_level > 6)
{
ACE_HANDLE const handle = eh->get_handle();
TAOLIB_DEBUG ((LM_DEBUG,
"TAO (%P|%t) - Connection_Handler[%d]::handle_input_internal, "
"handle = %d/%d\n",
t_id, handle, h));
}
TAO_Resume_Handle resume_handle (this->orb_core (), eh->get_handle ());
int return_value = 0;
this->pre_io_hook (return_value);
if (return_value != 0)
return return_value;
return_value = this->transport ()->handle_input (resume_handle);
this->pos_io_hook (return_value);
// Bug 1647; might need to change resume_handle's flag or
// change handle_input return value.
resume_handle.handle_input_return_value_hook(return_value);
if (TAO_debug_level > 6)
{
ACE_HANDLE const handle = eh->get_handle ();
TAOLIB_DEBUG ((LM_DEBUG,
"TAO (%P|%t) - Connection_Handler[%d]::handle_input_internal, "
"handle = %d/%d, retval = %d\n",
t_id, handle, h, return_value));
}
if (return_value == -1)
{
resume_handle.set_flag (TAO_Resume_Handle::TAO_HANDLE_LEAVE_SUSPENDED);
}
return return_value;
}
示例6: ACE_GUARD_RETURN
bool
TAO_DTP_Task::add_request (TAO::CSD::TP_Request* request)
{
{
ACE_GUARD_RETURN (TAO_SYNCH_MUTEX, guard, this->queue_lock_, false);
++this->num_queue_requests_;
if ((this->num_queue_requests_ > this->max_request_queue_depth_) &&
(this->max_request_queue_depth_ != 0))
{
this->accepting_requests_ = false;
}
if (!this->accepting_requests_)
{
if (TAO_debug_level > 4)
{
TAOLIB_DEBUG ((LM_DEBUG,
ACE_TEXT ("TAO (%P|%t) - DTP_Task::add_request() ")
ACE_TEXT ("not accepting requests.\n")
ACE_TEXT ("TAO (%P|%t) - DTP_Task::add_request() ")
ACE_TEXT ("num_queue_requests_ : [%d]\n")
ACE_TEXT ("TAO (%P|%t) - DTP_Task::add_request() ")
ACE_TEXT ("max_request_queue_depth_ : [%d]\n"),
this->num_queue_requests_,
this->max_request_queue_depth_));
}
--this->num_queue_requests_;
return false;
}
// We have made the decision that the request is going to be placed upon
// the queue_. Inform the request that it is about to be placed into
// a request queue. Some requests may not need to do anything in
// preparation of being placed into a queue. Others, however, may need
// to perfom a "clone" operation on some underlying request data before
// the request can be properly placed into a queue.
request->prepare_for_queue();
this->queue_.put(request);
}
{
ACE_GUARD_RETURN (TAO_SYNCH_MUTEX, guard, this->work_lock_, false);
this->check_queue_ = true;
this->work_available_.signal ();
if (TAO_debug_level > 4 )
{
TAOLIB_DEBUG((LM_DEBUG,
ACE_TEXT ("TAO (%P|%t) - DTP_Task::add_request() - ")
ACE_TEXT ("work available\n")));
}
}
return true;
}
示例7: rd
int
TAO_Muxed_TMS::reply_timed_out (CORBA::ULong request_id)
{
int result = 0;
ACE_Intrusive_Auto_Ptr<TAO_Reply_Dispatcher> rd(0);
// Grab the reply dispatcher for this id.
{
ACE_GUARD_RETURN (ACE_Lock,
ace_mon,
*this->lock_,
-1);
result = this->dispatcher_table_.unbind (request_id, rd);
}
if (result == 0 && rd)
{
if (TAO_debug_level > 8)
{
TAOLIB_DEBUG ((LM_DEBUG,
ACE_TEXT ("TAO (%P|%t) - TAO_Muxed_TMS::reply_timed_out, ")
ACE_TEXT ("id = %d\n"),
request_id));
}
// Do not move it outside the scope of the lock. A follower thread
// could have timedout unwinding the stack and the reply
// dispatcher, and that would mean the present thread could be left
// with a dangling pointer and may crash. To safeguard against such
// cases we dispatch with the lock held.
// Dispatch the reply.
rd->reply_timed_out ();
}
else
{
if (TAO_debug_level > 0)
TAOLIB_DEBUG ((LM_DEBUG,
ACE_TEXT ("TAO (%P|%t) - TAO_Muxed_TMS::reply_timed_out, ")
ACE_TEXT ("unbind dispatcher failed, id %d: result = %d\n"),
request_id,
result));
// Result = 0 means that the mux strategy was not able
// to find a registered reply handler, either because the reply
// was not our reply - just forget about it - or it was ours, but
// the reply timed out - just forget about the reply.
result = 0;
}
return result;
}
示例8: defined
template <class TYPE, class ACE_LOCK> void
TAO_Singleton<TYPE, ACE_LOCK>::dump (void)
{
#if defined (ACE_HAS_DUMP)
ACE_TRACE ("TAO_Singleton<TYPE, ACE_LOCK>::dump");
#if !defined (ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES)
TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("instance_ = %@"),
TAO_Singleton<TYPE, ACE_LOCK>::instance_i ()));
TAOLIB_DEBUG ((LM_DEBUG, ACE_END_DUMP));
#endif /* ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES */
#endif /* ACE_HAS_DUMP */
}
示例9: TAOLIB_DEBUG
bool
TAO_DTP_POA_Strategy::poa_activated_event_i (TAO_ORB_Core& orb_core)
{
this->dtp_task_.thr_mgr (orb_core.thr_mgr ());
// Activates the worker threads, and waits until all have been started.
if (!this->config_initialized_)
{
TAO_DTP_Config_Registry * config_repo =
ACE_Dynamic_Service<TAO_DTP_Config_Registry>::instance
("DTP_Config_Registry");
if (config_repo == 0)
{
if (TAO_debug_level > 0)
{
TAOLIB_DEBUG ((LM_DEBUG,
ACE_TEXT ("TAO (%P|%t) - DTP_POA_Strategy - ")
ACE_TEXT ("cannot retrieve configuration repo\n")));
}
return false;
}
else
{
TAO_DTP_Definition config_entry;
if (!config_repo->find (this->dynamic_tp_config_name_, config_entry))
{
TAOLIB_DEBUG ((LM_DEBUG,
ACE_TEXT ("TAO (%P|%t) - DTP_POA_Strategy - ")
ACE_TEXT ("warning: config not found...using ")
ACE_TEXT ("defaults!\n")));
}
this->set_dtp_config (config_entry);
//this->dtp_task_.set_init_pool_threads(config_entry.init_threads_);
//this->dtp_task_.set_min_pool_threads(config_entry.min_threads_);
//this->dtp_task_.set_max_pool_threads(config_entry.max_threads_);
//this->dtp_task_.set_thread_idle_time(config_entry.timeout_);
//this->dtp_task_.set_thread_stack_size(config_entry.stack_size_);
//this->dtp_task_.set_max_request_queue_depth(config_entry.queue_depth_);
}
}
return (this->dtp_task_.open () == 0);
}
示例10: TAOLIB_DEBUG
ssize_t
TAO_DIOP_Transport::recv (char *buf,
size_t len,
const ACE_Time_Value * /* max_wait_time */)
{
ACE_INET_Addr from_addr;
ssize_t const n = this->connection_handler_->peer ().recv (buf, len, from_addr);
if (TAO_debug_level > 0)
{
TAOLIB_DEBUG ((LM_DEBUG,
"TAO (%P|%t) - DIOP_Transport::recv, received %d bytes from %C:%d %d\n",
n,
from_addr.get_host_name (),
from_addr.get_port_number (),
ACE_ERRNO_GET));
}
// Most of the errors handling is common for
// Now the message has been read
if (n == -1 && TAO_debug_level > 4)
{
TAOLIB_DEBUG ((LM_DEBUG,
ACE_TEXT ("TAO (%P|%t) - DIOP_Transport::recv, %p\n"),
ACE_TEXT ("TAO - read message failure ")
ACE_TEXT ("recv ()\n")));
}
// Error handling
if (n == -1)
{
if (errno == EWOULDBLOCK)
return 0;
return -1;
}
// @@ What are the other error handling here??
else if (n == 0)
{
return -1;
}
// Remember the from addr to eventually use it as remote
// addr for the reply.
this->connection_handler_->addr (from_addr);
return n;
}
示例11: defined
int
TAO_IIOP_Connection_Handler::set_tos (int tos)
{
if (tos != this->dscp_codepoint_)
{
int result = 0;
#if defined (ACE_HAS_IPV6)
ACE_INET_Addr local_addr;
if (this->peer ().get_local_addr (local_addr) == -1)
return -1;
else if (local_addr.get_type () == AF_INET6)
# if !defined (IPV6_TCLASS)
// IPv6 defines option IPV6_TCLASS for specifying traffic class/priority
// but not many implementations yet (very new;-).
{
if (TAO_debug_level)
{
TAOLIB_DEBUG ((LM_DEBUG,
ACE_TEXT("TAO (%P|%t) - IIOP_Connection_Handler::")
ACE_TEXT("set_dscp_codepoint -> IPV6_TCLASS not supported yet\n")));
}
return 0;
}
# else /* !IPV6_TCLASS */
result = this->peer ().set_option (IPPROTO_IPV6,
IPV6_TCLASS,
(int *) &tos ,
(int) sizeof (tos));
else
示例12: safeguard
int
TAO_IIOP_Connection_Handler::handle_timeout (const ACE_Time_Value &,
const void *)
{
// Using this to ensure this instance will be deleted (if necessary)
// only after reset_state(). Without this, when this refcount==1 -
// the call to close() will cause a call to remove_reference() which
// will delete this. At that point this->reset_state() is in no
// man's territory and that causes SEGV on some platforms (Windows!)
TAO_Auto_Reference<TAO_IIOP_Connection_Handler> safeguard (*this);
// NOTE: Perhaps not the best solution, as it feels like the upper
// layers should be responsible for this?
// We don't use this upcall for I/O. This is only used by the
// Connector to indicate that the connection timedout. Therefore,
// we should call close()
int const ret = this->close ();
this->reset_state (TAO_LF_Event::LFS_TIMEOUT);
if (TAO_debug_level > 9)
{
TAOLIB_DEBUG ((LM_DEBUG, "TAO (%P|%t) - TAO_IIOP_Connection_Handler[%d]::"
"handle_timeout reset state to LFS_TIMEOUT\n",
this->transport ()-> id()));
}
return ret;
}
示例13: sizeof
int
TAO_IIOP_Connection_Handler::close_connection (void)
{
// To maintain maximum compatibility, we only set this socket option
// if the user has provided a linger timeout.
int const linger = this->orb_core()->orb_params()->linger ();
if (linger != -1)
{
struct linger lval;
lval.l_onoff = 1;
lval.l_linger = (u_short)linger;
if (this->peer ().set_option(SOL_SOCKET,
SO_LINGER,
(void*) &lval,
sizeof (lval)) == -1)
{
if (TAO_debug_level)
{
TAOLIB_DEBUG ((LM_DEBUG,
ACE_TEXT ("TAO (%P|%t) Unable to set ")
ACE_TEXT ("SO_LINGER on %d\n"),
this->peer ().get_handle ()));
}
}
}
return this->close_connection_eh (this);
}
示例14: sizeof
int
TAO_SCIOP_Endpoint::set (const ACE_INET_Addr &addr,
int use_dotted_decimal_addresses)
{
char tmp_host[MAXHOSTNAMELEN + 1];
if (use_dotted_decimal_addresses
|| addr.get_host_name (tmp_host, sizeof (tmp_host)) != 0)
{
const char *tmp = addr.get_host_addr ();
if (tmp == 0)
{
if (TAO_debug_level > 0)
TAOLIB_DEBUG ((LM_DEBUG,
ACE_TEXT ("\n\nTAO (%P|%t) ")
ACE_TEXT ("SCIOP_Endpoint::set ")
ACE_TEXT ("- %p\n\n"),
ACE_TEXT ("cannot determine hostname")));
return -1;
}
else
this->host_ = tmp;
}
else
this->host_ = CORBA::string_dup (tmp_host);
this->port_ = addr.get_port_number();
return 0;
}
示例15: ACE_GUARD_RETURN
bool
TAO::CSD::TP_Task::add_request(TP_Request* request)
{
ACE_GUARD_RETURN (TAO_SYNCH_MUTEX, guard, this->lock_, false);
if (!this->accepting_requests_)
{
TAOLIB_DEBUG((LM_DEBUG,"(%P|%t) TP_Task::add_request() - "
"not accepting requests\n"));
return false;
}
// We have made the decision that the request is going to be placed upon
// the queue_. Inform the request that it is about to be placed into
// a request queue. Some requests may not need to do anything in
// preparation of being placed into a queue. Others, however, may need
// to perfom a "clone" operation on some underlying request data before
// the request can be properly placed into a queue.
request->prepare_for_queue();
this->queue_.put(request);
this->work_available_.signal();
return true;
}