本文整理汇总了C++中ReliSock类的典型用法代码示例。如果您正苦于以下问题:C++ ReliSock类的具体用法?C++ ReliSock怎么用?C++ ReliSock使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ReliSock类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: agent_starter
int
agent_starter( ReliSock * s )
{
ReliSock* stream = (ReliSock*)s;
char *subsys = NULL;
stream->decode();
if( ! stream->code(subsys) ||
! stream->end_of_message() ) {
dprintf( D_ALWAYS, "Can't read subsystem name\n" );
free( subsys );
return FALSE;
}
dprintf ( D_ALWAYS, "Starting agent '%s'\n", subsys );
if( strcasecmp(subsys, "fetch_log") == 0 ) {
free (subsys);
return handle_agent_fetch_log( stream );
}
// default:
free (subsys);
dprintf( D_ALWAYS, "WARNING: unrecognized agent name\n" );
return FALSE;
}
示例2: do_item
bool do_item(Daemon* d, MyString name, int num, int output_mode) {
CondorError errstack;
ClassAd authz_ad;
bool sc_success;
ReliSock *sock = NULL;
bool fn_success = false;
sock = (ReliSock*) d->makeConnectedSocket( Stream::reli_sock, 0, 0, &errstack );
if (sock) {
sc_success = d->startSubCommand(DC_SEC_QUERY, num, sock, 0, &errstack);
if (sc_success) {
sock->decode();
if (getClassAd(sock, authz_ad) &&
sock->end_of_message()) {
fn_success = true;
}
}
}
print_info(fn_success, d->addr(), sock, name, num, &authz_ad, &errstack, output_mode);
return fn_success;
}
示例3: do_item
bool do_item(Daemon* d, MyString name, int num, int output_mode) {
CondorError errstack;
ClassAd authz_ad;
bool sc_success;
ReliSock *sock = NULL;
bool fn_success = false;
sock = (ReliSock*) d->makeConnectedSocket( Stream::reli_sock, 0, 0, &errstack );
if (sock) {
sc_success = d->startSubCommand(DC_SEC_QUERY, num, sock, 0, &errstack);
if (sc_success) {
sock->decode();
if (getClassAd(sock, authz_ad) &&
sock->end_of_message()) {
fn_success = true;
}
}
print_info(fn_success, sock->get_connect_addr(), sock, name, num, &authz_ad, &errstack, output_mode);
} else {
// we know that d->addr() is not null because we checked before
// calling do_item. but i'll be paranoid and check again.
fprintf(stderr, "ERROR: failed to make connection to %s\n", d->addr()?d->addr():"(null)");
}
return fn_success;
}
示例4: send_cmd_to_startd
/*
Takes sinful address of startd and sends it the given cmd, along
with the capability and an end_of_message.
*/
int
send_cmd_to_startd(char *sin_host, char *capability, int cmd)
{
ReliSock* sock = NULL;
Daemon startd (DT_STARTD, sin_host, NULL);
if (!(sock = (ReliSock*)startd.startCommand(cmd, Stream::reli_sock, 20))) {
dprintf( D_ALWAYS, "Can't connect to startd at %s\n", sin_host );
return -1;
}
// send the capability
ClaimIdParser idp( capability );
dprintf(D_FULLDEBUG, "send capability %s\n", idp.publicClaimId() );
if(!sock->code(capability)){
dprintf( D_ALWAYS, "sock->code(%s) failed.\n", idp.publicClaimId() );
delete sock;
return -3;
}
// send end of message
if( !sock->end_of_message() ) {
dprintf( D_ALWAYS, "end_of_message failed\n" );
delete sock;
return -4;
}
dprintf( D_FULLDEBUG, "Sent command %d to startd at %s with cap %s\n",
cmd, sin_host, idp.publicClaimId() );
delete sock;
return 0;
}
示例5: command
// when a transferd registers itself, it identifies who it is. The connection
// is then held open and the schedd periodically might send more transfer
// requests to the transferd. Also, if the transferd dies, the schedd is
// informed quickly and reliably due to the closed connection.
bool
DCTransferD::setup_treq_channel(ReliSock **treq_sock_ptr,
int timeout, CondorError *errstack)
{
ReliSock *rsock;
if (treq_sock_ptr != NULL) {
// Our caller wants a pointer to the socket we used to succesfully
// register the claim. The NULL pointer will represent failure and
// this will only be set to something real if everything was ok.
*treq_sock_ptr = NULL;
}
/////////////////////////////////////////////////////////////////////////
// Connect to the transfer daemon
/////////////////////////////////////////////////////////////////////////
// This call with automatically connect to _addr, which was set in the
// constructor of this object to be the transferd in question.
rsock = (ReliSock*)startCommand(TRANSFERD_CONTROL_CHANNEL,
Stream::reli_sock, timeout, errstack);
if( ! rsock ) {
dprintf( D_ALWAYS, "DCTransferD::setup_treq_channel: "
"Failed to send command (TRANSFERD_CONTROL_CHANNEL) "
"to the schedd\n" );
errstack->push("DC_TRANSFERD", 1,
"Failed to start a TRANSFERD_CONTROL_CHANNEL command.");
return false;
}
/////////////////////////////////////////////////////////////////////////
// Make sure we are authenticated.
/////////////////////////////////////////////////////////////////////////
// First, if we're not already authenticated, force that now.
if (!forceAuthentication( rsock, errstack )) {
dprintf( D_ALWAYS, "DCTransferD::setup_treq_channel() authentication "
"failure: %s\n", errstack->getFullText().c_str() );
errstack->push("DC_TRANSFERD", 1,
"Failed to authenticate properly.");
return false;
}
rsock->encode();
/////////////////////////////////////////////////////////////////////////
// At this point, the socket passed all of the authentication protocols
// so it is ready for use.
/////////////////////////////////////////////////////////////////////////
if (treq_sock_ptr)
*treq_sock_ptr = rsock;
return true;
}
示例6: if
int
ReliSock::accept( ReliSock &c )
{
int c_sock;
if (_state != sock_special || _special_state != relisock_listen ||
c._state != sock_virgin)
{
return FALSE;
}
if (_timeout > 0) {
Selector selector;
selector.set_timeout( _timeout );
selector.add_fd( _sock, Selector::IO_READ );
selector.execute();
if( selector.timed_out() ) {
return FALSE;
} else if ( !selector.has_ready() ) {
dprintf( D_ALWAYS, "select returns %d, connect failed\n",
selector.select_retval() );
return FALSE;
}
}
#ifndef WIN32 /* Unix */
errno = 0;
#endif
if ((c_sock = condor_accept(_sock, c._who)) < 0) {
#ifndef WIN32 /* Unix */
if ( errno == EMFILE ) {
_condor_fd_panic ( __LINE__, __FILE__ ); /* This calls dprintf_exit! */
}
#endif
return FALSE;
}
c.assign(c_sock);
c.enter_connected_state("ACCEPT");
c.decode();
int on = 1;
c.setsockopt(SOL_SOCKET, SO_KEEPALIVE, (char*)&on, sizeof(on));
/* Set no delay to disable Nagle, since we buffer all our
relisock output and it degrades performance of our
various chatty protocols. -Todd T, 9/05
*/
c.setsockopt(IPPROTO_TCP, TCP_NODELAY, (char*)&on, sizeof(on));
return TRUE;
}
示例7: HandleRequest
int TransferQueueManager::HandleRequest(int cmd,Stream *stream)
{
ReliSock *sock = (ReliSock *)stream;
ASSERT( cmd == TRANSFER_QUEUE_REQUEST );
ClassAd msg;
sock->decode();
if( !getClassAd( sock, msg ) || !sock->end_of_message() ) {
dprintf(D_ALWAYS,
"TransferQueueManager: failed to receive transfer request "
"from %s.\n", sock->peer_description() );
return FALSE;
}
bool downloading = false;
MyString fname;
MyString jobid;
MyString queue_user;
filesize_t sandbox_size;
if( !msg.LookupBool(ATTR_DOWNLOADING,downloading) ||
!msg.LookupString(ATTR_FILE_NAME,fname) ||
!msg.LookupString(ATTR_JOB_ID,jobid) ||
!msg.LookupString(ATTR_USER,queue_user) ||
!msg.LookupInteger(ATTR_SANDBOX_SIZE,sandbox_size))
{
MyString msg_str;
sPrintAd(msg_str, msg);
dprintf(D_ALWAYS,"TransferQueueManager: invalid request from %s: %s\n",
sock->peer_description(), msg_str.Value());
return FALSE;
}
// Currently, we just create the client with the default max queue
// age. If it becomes necessary to customize the maximum age
// on a case-by-case basis, it should be easy to adjust.
TransferQueueRequest *client =
new TransferQueueRequest(
sock,
sandbox_size,
fname.Value(),
jobid.Value(),
queue_user.Value(),
downloading,
m_default_max_queue_age);
if( !AddRequest( client ) ) {
delete client;
return KEEP_STREAM; // we have already closed this socket
}
return KEEP_STREAM;
}
示例8: strdup
// Called when the schedd initially connects to the transferd to finish
// the registration process.
int
TransferD::setup_transfer_request_handler(int /*cmd*/, Stream *sock)
{
ReliSock *rsock = (ReliSock*)sock;
MyString sock_id;
dprintf(D_ALWAYS, "Got TRANSFER_CONTROL_CHANNEL!\n");
rsock->decode();
///////////////////////////////////////////////////////////////
// make sure we are authenticated
///////////////////////////////////////////////////////////////
if( ! rsock->triedAuthentication() ) {
CondorError errstack;
if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack) ) {
// we failed to authenticate, we should bail out now
// since we don't know what user is trying to perform
// this action.
// TODO: it'd be nice to print out what failed, but we
// need better error propagation for that...
errstack.push( "TransferD::setup_transfer_request_handler()", 42,
"Failure to register transferd - Authentication failed" );
dprintf( D_ALWAYS, "setup_transfer_request_handler() "
"aborting: %s\n",
errstack.getFullText().c_str() );
refuse(rsock);
return CLOSE_STREAM;
}
}
rsock->decode();
///////////////////////////////////////////////////////////////
// Register this socket with a socket handler to handle incoming requests
///////////////////////////////////////////////////////////////
sock_id += "<TreqChannel-Socket>";
char* _sock_id = strdup( sock_id.Value() ); //de-const
// register the handler for any future transfer requests on this socket.
daemonCore->Register_Socket((Sock*)rsock, _sock_id,
(SocketHandlercpp)&TransferD::accept_transfer_request_handler,
"TransferD::accept_transfer_request_handler", this, ALLOW);
free( _sock_id );
dprintf(D_ALWAYS, "Treq channel established.\n");
dprintf(D_ALWAYS, "Accepting Transfer Requests.\n");
return KEEP_STREAM;
}
示例9:
bool
DCStarter::createJobOwnerSecSession(int timeout,char const *job_claim_id,char const *starter_sec_session,char const *session_info,MyString &owner_claim_id,MyString &error_msg,MyString &starter_version,MyString &starter_addr)
{
ReliSock sock;
if (IsDebugLevel(D_COMMAND)) {
dprintf (D_COMMAND, "DCStarter::createJobOwnerSecSession(%s,...) making connection to %s\n",
getCommandStringSafe(CREATE_JOB_OWNER_SEC_SESSION), _addr ? _addr : "NULL");
}
if( !connectSock(&sock, timeout, NULL) ) {
error_msg = "Failed to connect to starter";
return false;
}
if( !startCommand(CREATE_JOB_OWNER_SEC_SESSION, &sock,timeout,NULL,NULL,false,starter_sec_session) ) {
error_msg = "Failed to send CREATE_JOB_OWNER_SEC_SESSION to starter";
return false;
}
ClassAd input;
input.Assign(ATTR_CLAIM_ID,job_claim_id);
input.Assign(ATTR_SESSION_INFO,session_info);
sock.encode();
if( !putClassAd(&sock, input) || !sock.end_of_message() ) {
error_msg = "Failed to compose CREATE_JOB_OWNER_SEC_SESSION to starter";
return false;
}
sock.decode();
ClassAd reply;
if( !getClassAd(&sock, reply) || !sock.end_of_message() ) {
error_msg = "Failed to get response to CREATE_JOB_OWNER_SEC_SESSION from starter";
return false;
}
bool success = false;
reply.LookupBool(ATTR_RESULT,success);
if( !success ) {
reply.LookupString(ATTR_ERROR_STRING,error_msg);
return false;
}
reply.LookupString(ATTR_CLAIM_ID,owner_claim_id);
reply.LookupString(ATTR_VERSION,starter_version);
// get the full starter address from the starter in case it contains
// extra CCB info that we don't already know about
reply.LookupString(ATTR_STARTER_IP_ADDR,starter_addr);
return true;
}
示例10: initialize
void
initialize()
{
char *tmp;
string collName;
dprintf(D_FULLDEBUG, "AviaryLocatorPlugin: Initializing...\n");
tmp = param("COLLECTOR_NAME");
if (NULL == tmp) {
collName = getPoolName();
} else {
collName = tmp;
free(tmp); tmp = NULL;
}
string log_name;
formatstr(log_name,"aviary_locator.log");
provider = AviaryProviderFactory::create(log_name, getPoolName(),CUSTOM,LOCATOR, "services/locator/");
if (!provider) {
EXCEPT("Unable to configure AviaryProvider. Exiting...");
}
ReliSock *sock = new ReliSock;
if (!sock) {
EXCEPT("Failed to allocate transport socket");
}
if (!sock->assign(provider->getListenerSocket())) {
EXCEPT("Failed to bind transport socket");
}
int index;
if (-1 == (index =
daemonCore->Register_Socket((Stream *) sock,
"Aviary Method Socket",
(SocketHandlercpp) ( &AviaryLocatorPlugin::handleTransportSocket ),
"Handler for Aviary Methods.", this))) {
EXCEPT("Failed to register transport socket");
}
int pruning_interval = param_integer("AVIARY_LOCATOR_PRUNE_INTERVAL",20);
if (-1 == (index = daemonCore->Register_Timer(
pruning_interval,pruning_interval*2,
(TimerHandlercpp)(&AviaryLocatorPlugin::handleTimerCallback),
"Timer for pruning unresponsive endpoints", this))) {
EXCEPT("Failed to register pruning timer");
}
}
示例11: handle_request
int IOProxyHandler::handle_request( Stream *s )
{
char line[CHIRP_LINE_MAX];
ReliSock *r = (ReliSock *) s;
if(r->get_line_raw(line,CHIRP_LINE_MAX)>0) {
if( got_cookie ) {
handle_standard_request(r,line);
} else {
handle_cookie_request(r,line);
}
return KEEP_STREAM;
} else {
dprintf(D_FULLDEBUG,"IOProxyHandler: closing connection to %s\n",r->peer_ip_str());
delete this;
return ~KEEP_STREAM;
}
}
示例12: initialize
void
initialize()
{
char *tmp;
string collName;
dprintf(D_FULLDEBUG, "AviaryCollectorPlugin: Initializing...\n");
tmp = param("COLLECTOR_NAME");
if (NULL == tmp) {
collName = getPoolName();
} else {
collName = tmp;
free(tmp); tmp = NULL;
}
string log_name("aviary_collector.log");
string id_name("collector"); id_name+=SEPARATOR; id_name+=getPoolName();
provider = AviaryProviderFactory::create(log_name, id_name,"COLLECTOR","POOL","services/collector/");
if (!provider) {
EXCEPT("Unable to configure AviaryProvider. Exiting...");
}
collector = CollectorObject::getInstance();
ReliSock *sock = new ReliSock;
if (!sock) {
EXCEPT("Failed to allocate transport socket");
}
if (!sock->assign(provider->getListenerSocket())) {
EXCEPT("Failed to bind transport socket");
}
int index;
if (-1 == (index =
daemonCore->Register_Socket((Stream *) sock,
"Aviary Method Socket",
(SocketHandlercpp) ( &AviaryCollectorPlugin::handleTransportSocket ),
"Handler for Aviary Methods.", this))) {
EXCEPT("Failed to register transport socket");
}
collector->setMyAddress(daemonCore->publicNetworkIpAddr());
}
示例13: log_name
void
AviaryScheddPlugin::earlyInitialize()
{
// Since this plugin is registered with multiple
// PluginManagers it may be initialized more than once,
// and we don't want that
static bool skip = false;
if (skip) return; skip = true;
string log_name("aviary_job.log");
string id_name("job"); id_name+=SEPARATOR; id_name+=getScheddName();
provider = AviaryProviderFactory::create(log_name,id_name,
"SCHEDULER","JOB","services/job/");
if (!provider) {
EXCEPT("Unable to configure AviaryProvider. Exiting...");
}
schedulerObj = SchedulerObject::getInstance();
dirtyJobs = new DirtyJobsType();
isHandlerRegistered = false;
ReliSock *sock = new ReliSock;
if (!sock) {
EXCEPT("Failed to allocate transport socket");
}
if (!sock->assign(provider->getListenerSocket())) {
EXCEPT("Failed to bind transport socket");
}
int index;
if (-1 == (index =
daemonCore->Register_Socket((Stream *) sock,
"Aviary Method Socket",
(SocketHandlercpp) ( &AviaryScheddPlugin::handleTransportSocket ),
"Handler for Aviary Methods.",
this))) {
EXCEPT("Failed to register transport socket");
}
m_initialized = false;
}
示例14: Sock
ReliSock::ReliSock(const ReliSock & orig) : Sock(orig)
{
init();
// now copy all cedar state info via the serialize() method
char *buf = NULL;
buf = orig.serialize(); // get state from orig sock
ASSERT(buf);
serialize(buf); // put the state into the new sock
delete [] buf;
}
示例15:
SharedPortState::HandlerResult
SharedPortState::HandleHeader(Stream *&s)
{
// First tell the target daemon that we are about to send the fd.
ReliSock *sock = static_cast<ReliSock*>(s);
sock->encode();
if( !sock->put((int)SHARED_PORT_PASS_SOCK) ||
!sock->end_of_message() )
{
dprintf(D_ALWAYS,"SharedPortClient: failed to send SHARED_PORT_PASS_FD to %s%s: %s\n",
m_sock_name.c_str(),
m_requested_by.c_str(),
strerror(errno));
return FAILED;
}
m_state = SEND_FD;
return CONTINUE;
}