本文整理汇总了C++中UdpServer类的典型用法代码示例。如果您正苦于以下问题:C++ UdpServer类的具体用法?C++ UdpServer怎么用?C++ UdpServer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了UdpServer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: sendReply
// this must always be called sometime AFTER handleRequest() is called
void sendReply ( UdpSlot *slot , Msg39 *msg39 , char *reply , long replyLen ,
long replyMaxSize , bool hadError ) {
// debug msg
if ( g_conf.m_logDebugQuery || (msg39&&msg39->m_debug) )
logf(LOG_DEBUG,"query: msg39: [%lu] Sending reply len=%li.",
(long)msg39,replyLen);
// sanity
if ( hadError && ! g_errno ) { char *xx=NULL;*xx=0; }
// no longer in use. msg39 will be NULL if ENOMEM or something
if ( msg39 ) msg39->m_inUse = false;
// . if we enter from a local call and not from handling a udp slot
// then execute this logic here to return control to caller.
// . do not delete ourselves because we will be re-used probably and
// caller handles that now.
if ( msg39 && msg39->m_callback ) {
// if we blocked call user callback
if ( msg39->m_blocked ) msg39->m_callback ( msg39->m_state );
// if not sending back a udp reply, return now
return;
}
// . now we can free the lists before sending
// . may help a little bit...
//if ( msg39 ) {
// for ( long j = 0 ; j < msg39->m_msg2.m_numLists ; j++ )
// msg39->m_lists[j].freeList();
//}
// get the appropriate UdpServer for this niceness level
UdpServer *us = &g_udpServer;
// i guess clear this
long err = g_errno;
g_errno = 0;
// send an error reply if g_errno is set
if ( err ) us->sendErrorReply ( slot , err ) ;
else us->sendReply_ass ( reply ,
replyLen ,
reply ,
replyMaxSize ,
slot );
// always delete ourselves when done handling the request
if ( msg39 ) {
mdelete ( msg39 , sizeof(Msg39) , "Msg39" );
delete (msg39);
}
}
示例2: UdpServerForIpWorker
UINT __stdcall UdpServerForIpWorker(PVOID context)
{
UdpServer server;
if (server.Init("127.0.0.1", (int)context)) {
std::vector<UCHAR> buffer;
buffer.resize(0x1000);
for (;;) {
sockaddr_in addr;
int recv_length = server.Recv((char *)buffer.data(), buffer.size(), &addr);
if (recv_length == -1) {
break;
}
server.Send("OK", 3, &addr);
}
}
return 0;
}
示例3: addedList
// g_errno may be set when this is called
void addedList ( UdpSlot *slot , Rdb *rdb ) {
// no memory means to try again
if ( g_errno == ENOMEM ) g_errno = ETRYAGAIN;
// doing a full rebuid will add collections
if ( g_errno == ENOCOLLREC &&
g_repairMode > 0 )
//g_repair.m_fullRebuild )
g_errno = ETRYAGAIN;
// . if we got a ETRYAGAIN cuz the buffer we add to was full
// then we should sleep and try again!
// . return false cuz this blocks for a period of time
// before trying again
// . but now to free the udp slot when we are doing an urgent merge
// let's send an error back!
//if ( g_errno == ETRYAGAIN ) {
// debug msg
//log("REGISTERING SLEEP CALLBACK");
// try adding again in 1 second
// g_loop.registerSleepCallback ( 1000, slot, tryAgainWrapper );
// return now
// return;
//}
// random test
//if ( (rand() % 10) == 1 ) g_errno = ETRYAGAIN;
//long niceness = slot->getNiceness() ;
// select udp server based on niceness
UdpServer *us = &g_udpServer ;
//if ( niceness == 0 ) us = &g_udpServer2;
//else us = &g_udpServer ;
// chalk it up
rdb->sentReplyAdd ( 0 );
// are we done
if ( ! g_errno ) {
// . send an empty (non-error) reply as verification
// . slot should be auto-nuked on transmission/timeout of reply
// . udpServer should free the readBuf
us->sendReply_ass ( NULL , 0 , NULL , 0 , slot ) ;
return;
}
// on other errors just send the err code back
us->sendErrorReply ( slot , g_errno );
}
示例4: startThread
void* UdpServer::startThread(void *obj)
{
UdpServer *instance = reinterpret_cast<UdpServer *>(obj);
socklen_t londesde;
londesde = sizeof(sockaddr_in);
while(true)
{
int size;
londesde = sizeof(sockaddr_in);
(instance->getUDPRespSock())->sin_port = htons (instance->getUDPPort());
//cerr<<"UDPSERV: listening"<<endl;
if ( (size = recvfrom(instance->getUDPSocket(), instance->getUDPBuffer(), MAXMSGSIZE, 0, (sockaddr *)instance->getUDPRespSock(), &londesde)) < 0 ) {
cerr<<"UDPSERV: Error calling recvfrom()"<<endl;
return NULL;
}
if(size>0)
instance->UDPmanageRecv(size);
}
}
示例5: handleRequest5d
// . did we receive a checkoff request from a fellow twin?
// . request is a list of checkoff request keys ("a" keys)
void handleRequest5d ( UdpSlot *slot , long netnice ) {
// get the sending hostid
long sid = slot->m_hostId;
// sanity check
if ( sid < 0 ) { char *xx=NULL; *xx=0; }
// get the request buffer
//key128_t *keys = (key128_t *)slot->m_readBuf;
long nk = slot->m_readBufSize / 16;
// shortcut
UdpServer *us = &g_udpServer;
// if tree gets full, then return false forever
if ( ! g_syncdb.m_qt.hasRoomForKeys ( nk ) ) {
us->sendErrorReply ( slot , ETRYAGAIN );
return;
}
for ( long i = 0 ; i < nk ; i++ ) {
// get the key
key128_t k = g_syncdb.m_keys[i];
// sanity check. must be a negative key.
if ( (k.n0 & 0x1) != 0x0 ) { char *xx=NULL;*xx=0; }
// get the anti key. the "need to recv checkoff request"
// key which is the positive
key128_t pk = k;
// make it positive
pk.n0 |= 0x01;
// is it in there?
long nn = g_syncdb.m_qt.getNode ( 0 , (char *)&pk );
// if yes, nuke it. they annihilate.
if ( nn >= 0 ) {
g_syncdb.m_qt.deleteNode ( nn , true );
continue;
}
// . otherwise, add right to the tree
// . should always succeed!
if ( g_syncdb.m_qt.addKey(&k)<0) { char *xx=NULL;*xx=0; }
}
// return empty reply to mean success
us->sendReply_ass ( NULL , 0 , NULL , 0 , slot );
}
示例6:
void * UdpServer::OperatorThread(void * pParam)
{
if(!pParam)
{
return 0;
}
UdpServer * pThis = (UdpServer*)pParam;
pthread_mutex_lock(&pThis->mMutex);
MsgInfo data;
if(pThis->mDataList.size() > 0)
{
data = pThis->mDataList.front();
pThis->mDataList.pop_front();
if(pThis->mOperaFunc)
{
pThis->mOperaFunc(data.Content, data.Fd, data.Time);
}
pThis->OnReceiveMessage(data.Content, data.Fd, data.Time);
}
pthread_mutex_unlock(&pThis->mMutex);
return 0;
}
示例7: ask
//.........这里部分代码省略.........
// no, not anymore, we commented out that request peeking code
char *p = m_request;
*(int64_t *) p = syncPoint ; p += 8;
//*(key_t *) p = m_startKey ; p += sizeof(key_t);
//*(key_t *) p = m_endKey ; p += sizeof(key_t);
*(int32_t *) p = m_minRecSizes ; p += 4;
*(int32_t *) p = startFileNum ; p += 4;
*(int32_t *) p = numFiles ; p += 4;
*(int32_t *) p = maxCacheAge ; p += 4;
if ( p - m_request != RDBIDOFFSET ) { char *xx=NULL;*xx=0; }
*p = m_rdbId ; p++;
*p = addToCache ; p++;
*p = doErrorCorrection; p++;
*p = includeTree ; p++;
*p = (char)niceness ; p++;
*p = (char)m_allowPageCache; p++;
KEYSET(p,m_startKey,m_ks); ; p+=m_ks;
KEYSET(p,m_endKey,m_ks); ; p+=m_ks;
// NULL terminated collection name
//strcpy ( p , coll ); p += gbstrlen ( coll ); *p++ = '\0';
*(collnum_t *)p = m_collnum; p += sizeof(collnum_t);
m_requestSize = p - m_request;
// ask an individual host for this list if hostId is NOT -1
if ( m_hostId != -1 ) {
// get Host
Host *h = g_hostdb.getHost ( m_hostId );
if ( ! h ) {
g_errno = EBADHOSTID;
log(LOG_LOGIC,"net: msg0: Bad hostId of %"INT64".",
m_hostId);
return true;
}
// if niceness is 0, use the higher priority udpServer
UdpServer *us ;
uint16_t port;
QUICKPOLL(m_niceness);
//if ( niceness <= 0 || netnice == 0 ) {
//if ( realtime ) {
// us = &g_udpServer2; port = h->m_port2; }
//else {
us = &g_udpServer ; port = h->m_port ;
// . returns false on error and sets g_errno, true otherwise
// . calls callback when reply is received (or error)
// . we return true if it returns false
if ( ! us->sendRequest ( m_request ,
m_requestSize ,
0x00 , // msgType
h->m_ip ,
port ,
m_hostId ,
NULL , // the slotPtr
this ,
gotSingleReplyWrapper ,
timeout ,
-1 , // backoff
-1 , // maxwait
replyBuf ,
replyBufMaxSize ,
m_niceness ) ) // cback niceness
return true;
// return false cuz it blocked
return false;
}
// timing debug
if ( g_conf.m_logTimingNet )
m_startTime = gettimeofdayInMilliseconds();
示例8: handleRequest0
// . reply to a request for an RdbList
// . MUST call g_udpServer::sendReply or sendErrorReply() so slot can
// be destroyed
void handleRequest0 ( UdpSlot *slot , int32_t netnice ) {
// if niceness is 0, use the higher priority udpServer
UdpServer *us = &g_udpServer;
//if ( netnice == 0 ) us = &g_udpServer2;
// get the request
char *request = slot->m_readBuf;
int32_t requestSize = slot->m_readBufSize;
// collection is now stored in the request, so i commented this out
//if ( requestSize != MSG0_REQ_SIZE ) {
// log("net: Received bad data request size of %"INT32" bytes. "
// "Should be %"INT32".", requestSize ,(int32_t)MSG0_REQ_SIZE);
// us->sendErrorReply ( slot , EBADREQUESTSIZE );
// return;
//}
// parse the request
char *p = request;
int64_t syncPoint = *(int64_t *)p ; p += 8;
//key_t startKey = *(key_t *)p ; p += sizeof(key_t);
//key_t endKey = *(key_t *)p ; p += sizeof(key_t);
int32_t minRecSizes = *(int32_t *)p ; p += 4;
int32_t startFileNum = *(int32_t *)p ; p += 4;
int32_t numFiles = *(int32_t *)p ; p += 4;
int32_t maxCacheAge = *(int32_t *)p ; p += 4;
char rdbId = *p++;
char addToCache = *p++;
char doErrorCorrection = *p++;
char includeTree = *p++;
// this was messing up our niceness conversion logic
int32_t niceness = slot->m_niceness;//(int32_t)(*p++);
// still need to skip it though!
p++;
bool allowPageCache = (bool)(*p++);
char ks = getKeySizeFromRdbId ( rdbId );
char *startKey = p; p+=ks;
char *endKey = p; p+=ks;
// then null terminated collection
//char *coll = p;
collnum_t collnum = *(collnum_t *)p; p += sizeof(collnum_t);
CollectionRec *xcr = g_collectiondb.getRec ( collnum );
if ( ! xcr ) g_errno = ENOCOLLREC;
// error set from XmlDoc::cacheTermLists()?
if ( g_errno ) {
us->sendErrorReply ( slot , EBADRDBID ); return;}
// is this being called from callWaitingHandlers()
//bool isRecall = (netnice == 99);
// . get the rdb we need to get the RdbList from
// . returns NULL and sets g_errno on error
//Msg0 msg0;
//Rdb *rdb = msg0.getRdb ( rdbId );
Rdb *rdb = getRdbFromId ( rdbId );
if ( ! rdb ) {
us->sendErrorReply ( slot , EBADRDBID ); return;}
// keep track of stats
rdb->readRequestGet ( requestSize );
/*
// keep track of stats
if ( ! isRecall ) rdb->readRequestGet ( requestSize );
int64_t singleDocId2 = 0LL;
if ( rdbId == RDB_POSDB && maxCacheAge ) {
int64_t d1 = g_posdb.getDocId(startKey);
int64_t d2 = g_posdb.getDocId(endKey);
if ( d1+1 == d2 ) singleDocId2 = d1;
}
// have we parsed this docid and cached its termlists?
bool shouldBeCached2 = false;
if ( singleDocId2 &&
isDocIdInTermListCache ( singleDocId2 , coll ) )
shouldBeCached2 = true;
// if in the termlist cache, send it back right away
char *trec;
int32_t trecSize;
if ( singleDocId2 &&
getRecFromTermListCache(coll,
startKey,
endKey,
maxCacheAge,
&trec,
&trecSize) ) {
// if in cache send it back!
us->sendReply_ass(trec,trecSize,trec,trecSize,slot);
return;
}
// if should be cached but was not found then it's probably a
// synonym form not in the doc content. make an empty list then.
if ( shouldBeCached2 ) {
// send back an empty termlist
us->sendReply_ass(NULL,0,NULL,0,slot);
//.........这里部分代码省略.........
示例9: gotListWrapper
// . slot should be auto-nuked upon transmission or error
// . TODO: ensure if this sendReply() fails does it really nuke the slot?
void gotListWrapper ( void *state , RdbList *listb , Msg5 *msg5xx ) {
// get the state
State00 *st0 = (State00 *)state;
// extract the udp slot and list and msg5
UdpSlot *slot = st0->m_slot;
RdbList *list = &st0->m_list;
Msg5 *msg5 = &st0->m_msg5;
UdpServer *us = st0->m_us;
// sanity check -- ensure they match
//if ( niceness != st0->m_niceness )
// log("Msg0: niceness mismatch");
// debug msg
//if ( niceness != 0 )
// log("HEY! niceness is not 0");
// timing debug
if ( g_conf.m_logTimingNet || g_conf.m_logDebugNet ) {
//log("Msg0:hndled request %"UINT64"",gettimeofdayInMilliseconds());
int32_t size = -1;
if ( list ) size = list->getListSize();
log(LOG_TIMING|LOG_DEBUG,
"net: msg0: Handled request for data. "
"Now sending data termId=%"UINT64" size=%"INT32""
" transId=%"INT32" ip=%s port=%i took=%"INT64" "
"(niceness=%"INT32").",
g_posdb.getTermId(msg5->m_startKey),
size,slot->m_transId,
iptoa(slot->m_ip),slot->m_port,
gettimeofdayInMilliseconds() - st0->m_startTime ,
st0->m_niceness );
}
// debug
//if ( ! msg5->m_includeTree )
// log("hotit\n");
// on error nuke the list and it's data
if ( g_errno ) {
mdelete ( st0 , sizeof(State00) , "Msg0" );
delete (st0);
// TODO: free "slot" if this send fails
us->sendErrorReply ( slot , g_errno );
return;
}
QUICKPOLL(st0->m_niceness);
// point to the serialized list in "list"
char *data = list->getList();
int32_t dataSize = list->getListSize();
char *alloc = list->getAlloc();
int32_t allocSize = list->getAllocSize();
// tell list not to free the data since it is a reply so UdpServer
// will free it when it destroys the slot
list->setOwnData ( false );
// keep track of stats
Rdb *rdb = getRdbFromId ( st0->m_rdbId );
if ( rdb ) rdb->sentReplyGet ( dataSize );
// TODO: can we free any memory here???
// keep track of how long it takes to complete the send
st0->m_startTime = gettimeofdayInMilliseconds();
// debug point
int32_t oldSize = msg5->m_minRecSizes;
int32_t newSize = msg5->m_minRecSizes + 20;
// watch for wrap around
if ( newSize < oldSize ) newSize = 0x7fffffff;
if ( dataSize > newSize && list->getFixedDataSize() == 0 &&
// do not annoy me with these linkdb msgs
dataSize > newSize+100 )
log(LOG_LOGIC,"net: msg0: Sending more data than what was "
"requested. Ineffcient. Bad engineer. dataSize=%"INT32" "
"minRecSizes=%"INT32".",dataSize,oldSize);
/*
// always compress these lists
if ( st0->m_rdbId == RDB_SECTIONDB ) { // && 1 == 3) {
// get sh48, the sitehash
key128_t *startKey = (key128_t *)msg5->m_startKey ;
int64_t sh48 = g_datedb.getTermId(startKey);
// debug
//log("msg0: got sectiondblist from disk listsize=%"INT32"",
// list->getListSize());
if ( dataSize > 50000 )
log("msg0: sending back list rdb=%"INT32" "
"listsize=%"INT32" sh48=0x%"XINT64"",
(int32_t)st0->m_rdbId,
dataSize,
sh48);
// save it
int32_t origDataSize = dataSize;
// store compressed list on itself
char *dst = list->m_list;
// warn if niceness is 0!
if ( st0->m_niceness == 0 )
log("msg0: compressing sectiondb list at niceness 0!");
// compress the list
uint32_t lastVoteHash32 = 0LL;
SectionVote *lastVote = NULL;
//.........这里部分代码省略.........
示例10: if
//接收socket连接线程
void * UdpServer::AcceptThread(void * pParam)
{
if(0 == pParam)
{
cout << "param is null." << endl;
return 0;
}
UdpServer * pThis = (UdpServer*)pParam;
int maxFd = 0;
struct sockaddr_in cliAddr;
while(true)
{
FD_ZERO(&pThis->mReadFd);
FD_SET(pThis->mServerSocket, &pThis->mReadFd);
maxFd = (pThis->mServerSocket > maxFd)? pThis->mServerSocket : maxFd;
for(list<int>::iterator itor = pThis->mCliList.begin(); itor != pThis->mCliList.end(); itor++)
{
FD_SET(*itor, &pThis->mReadFd);
maxFd = (*itor > maxFd)? *itor : maxFd;
}
int res = select(maxFd + 1, &pThis->mReadFd, 0, 0, NULL);
if(-1 == res)
{
cout << "select failed." << endl;
}
if(-1 != pThis->mServerSocket && FD_ISSET(pThis->mServerSocket , &pThis->mReadFd))
{
int sin_size = sizeof(struct sockaddr_in);
int clientSoc = accept(pThis->mServerSocket, (struct sockaddr *)(&cliAddr), (socklen_t*)&sin_size);
if(-1 == clientSoc)
{
cout << "accept error." << endl;
continue;
}
if(DEBUG) cout << "Accept new client : " << clientSoc << endl;
pThis->mCliList.push_front(clientSoc);
string ip = inet_ntoa(cliAddr.sin_addr);
char tmp[16] = {0};
sprintf(tmp, "%d", ntohs(cliAddr.sin_port));
string port = tmp;
if(0 != pThis->mAcceptFunc)
{
pThis->mAcceptFunc(ip, port, clientSoc);
}
pThis->OnClientConnect(ip, port, clientSoc);
}
for(list<int>::iterator itor = pThis->mCliList.begin(); itor != pThis->mCliList.end(); itor++)
{
if(-1 != *itor && FD_ISSET(*itor , &pThis->mReadFd))
{
char buf[MAX_MESSAGE_LEN] = {0};
int res = recv(*itor, buf, sizeof(buf), 0);
if(0 < res)
{
if(DEBUG) cout << "Receive data : " << buf << endl;
pthread_mutex_lock(&pThis->mMutex);
pThis->mDataList.push_back(MsgInfo(*itor, buf));
pthread_mutex_unlock(&pThis->mMutex);
}
else if(0 == res)
{
cout << "client quit." << endl;
pthread_mutex_lock(&pThis->mMutex);
pThis->mCliList.remove(*itor);
itor--;
pThis->mDisconnectFunc(*itor);
pThis->OnClientDisconnect(*itor);
pthread_mutex_unlock(&pThis->mMutex);
}
else
{
cout << "receive failed." << endl;
}
}
}
}
}
示例11: void
//.........这里部分代码省略.........
// if docid based assume it was a query reindex and keep it short!
// otherwise we end up waiting 120 seconds for a query reindex to
// go through on a docid we just spidered. TODO: use m_urlIsDocId
// MDW: check this out
if ( url && is_digit(url[0]) ) ct = 2;
// . this seems to be messing us up and preventing us from adding new
// requests into doledb when only spidering a few IPs.
// . make it random in the case of twin contention
ct = rand() % 10;
// . check our cache to avoid repetitive asking
// . use -1 for maxAge to indicate no max age
// . returns -1 if not in cache
// . use maxage of two minutes, 120 seconds
int32_t lockTime ;
lockTime = g_spiderLoop.m_lockCache.getLong(0,m_lockKeyUh48,ct,true);
// if it was in the cache and less than 2 minutes old then return
// true now with m_hasLock set to false.
if ( lockTime >= 0 ) {
if ( g_conf.m_logDebugSpider )
logf(LOG_DEBUG,"spider: cached missed lock for %s "
"lockkey=%" PRIu64, m_url,m_lockKeyUh48);
return true;
}
if ( g_conf.m_logDebugSpider )
logf(LOG_DEBUG,"spider: sending lock request for %s "
"lockkey=%" PRIu64, m_url,m_lockKeyUh48);
// now the locking group is based on the probable docid
//m_lockGroupId = g_hostdb.getGroupIdFromDocId(m_lockKey);
// ptr to list of hosts in the group
//Host *hosts = g_hostdb.getGroup ( m_lockGroupId );
// the same group (shard) that has the spiderRequest/Reply is
// the one responsible for locking.
Host *hosts = g_hostdb.getMyShard();
// shortcut
UdpServer *us = &g_udpServer;
static int32_t s_lockSequence = 0;
// remember the lock sequence # in case we have to call remove locks
m_lockSequence = s_lockSequence++;
LockRequest *lr = &m_lockRequest;
lr->m_lockKeyUh48 = m_lockKeyUh48;
lr->m_firstIp = m_firstIp;
lr->m_removeLock = 0;
lr->m_lockSequence = m_lockSequence;
lr->m_collnum = collnum;
// reset counts
m_numRequests = 0;
m_numReplies = 0;
// point to start of the 12 byte request buffer
char *request = (char *)lr;//m_lockKey;
int32_t requestSize = sizeof(LockRequest);//12;
// loop over hosts in that shard
for ( int32_t i = 0 ; i < hpg ; i++ ) {
// get a host
Host *h = &hosts[i];
// skip if dead! no need to get a reply from dead guys
if ( g_hostdb.isDead (h) ) continue;
// note it
if ( g_conf.m_logDebugSpider )
logf(LOG_DEBUG,"spider: sent lock "
"request #%" PRId32" for lockkey=%" PRIu64" %s to "
"hid=%" PRId32,m_numRequests,m_lockKeyUh48,
m_url,h->m_hostId);
// send request to him
if ( ! us->sendRequest ( request ,
requestSize ,
0x12 , // msgType
h->m_ip ,
h->m_port ,
h->m_hostId ,
NULL , // retSlotPtrPtr
this , // state data
gotLockReplyWrapper ,
udpserver_sendrequest_infinite_timeout ) )
// udpserver returns false and sets g_errno on error
return true;
// count them
m_numRequests++;
}
// block?
if ( m_numRequests > 0 ) return false;
// i guess nothing... hmmm... all dead?
//char *xx=NULL; *xx=0;
// m_hasLock should be false... all lock hosts seem dead... wait
if ( g_conf.m_logDebugSpider )
logf(LOG_DEBUG,"spider: all lock hosts seem dead for %s "
"lockkey=%" PRIu64, m_url,m_lockKeyUh48);
return true;
}
示例12: logf
bool Msg12::removeAllLocks ( ) {
// ensure not in use. not msg12 replies outstanding.
if ( m_numRequests != m_numReplies ) { char *xx=NULL;*xx=0; }
// no longer use this
char *xx=NULL;*xx=0;
// skip if injecting
//if ( m_sreq->m_isInjecting ) return true;
if ( g_conf.m_logDebugSpider )
logf(LOG_DEBUG,"spider: removing all locks for %s %" PRIu64,
m_url,m_lockKeyUh48);
// we are now removing
m_removing = true;
LockRequest *lr = &m_lockRequest;
lr->m_lockKeyUh48 = m_lockKeyUh48;
lr->m_lockSequence = m_lockSequence;
lr->m_firstIp = m_firstIp;
lr->m_removeLock = 1;
// reset counts
m_numRequests = 0;
m_numReplies = 0;
// make that the request
// . point to start of the 12 byte request buffer
// . m_lockSequence should still be valid
char *request = (char *)lr;//m_lockKey;
int32_t requestSize = sizeof(LockRequest);//12;
// now the locking group is based on the probable docid
//uint32_t groupId = g_hostdb.getGroupIdFromDocId(m_lockKeyUh48);
// ptr to list of hosts in the group
//Host *hosts = g_hostdb.getGroup ( groupId );
Host *hosts = g_hostdb.getMyShard();
// this must select the same group that is going to spider it!
// i.e. our group! because we check our local lock table to see
// if a doled url is locked before spidering it ourselves.
//Host *hosts = g_hostdb.getMyGroup();
// shortcut
UdpServer *us = &g_udpServer;
// set the hi bit though for this one
//m_lockKey |= 0x8000000000000000LL;
// get # of hosts in each mirror group
int32_t hpg = g_hostdb.getNumHostsPerShard();
// loop over hosts in that shard
for ( int32_t i = 0 ; i < hpg ; i++ ) {
// get a host
Host *h = &hosts[i];
// skip if dead! no need to get a reply from dead guys
if ( g_hostdb.isDead ( h ) ) continue;
// send request to him
if ( ! us->sendRequest ( request ,
requestSize ,
0x12 , // msgType
h->m_ip ,
h->m_port ,
h->m_hostId ,
NULL , // retSlotPtrPtr
this , // state data
gotLockReplyWrapper ,
udpserver_sendrequest_infinite_timeout ) )
// udpserver returns false and sets g_errno on error
return true;
// count them
m_numRequests++;
}
// block?
if ( m_numRequests > 0 ) return false;
// did not block
return true;
}
示例13: ask
//.........这里部分代码省略.........
char *p = m_request;
*(int64_t *) p = syncPoint ; p += 8;
//*(key_t *) p = m_startKey ; p += sizeof(key_t);
//*(key_t *) p = m_endKey ; p += sizeof(key_t);
*(int32_t *) p = m_minRecSizes ; p += 4;
*(int32_t *) p = startFileNum ; p += 4;
*(int32_t *) p = numFiles ; p += 4;
*(int32_t *) p = maxCacheAge ; p += 4;
if ( p - m_request != RDBIDOFFSET ) { char *xx=NULL;*xx=0; }
*p = m_rdbId ; p++;
*p = addToCache ; p++;
*p = doErrorCorrection; p++;
*p = includeTree ; p++;
*p = (char)niceness ; p++;
*p = (char)m_allowPageCache; p++;
KEYSET(p,m_startKey,m_ks); ; p+=m_ks;
KEYSET(p,m_endKey,m_ks); ; p+=m_ks;
// NULL terminated collection name
//strcpy ( p , coll ); p += gbstrlen ( coll ); *p++ = '\0';
*(collnum_t *)p = m_collnum; p += sizeof(collnum_t);
m_requestSize = p - m_request;
// ask an individual host for this list if hostId is NOT -1
if ( m_hostId != -1 ) {
// get Host
Host *h = g_hostdb.getHost ( m_hostId );
if ( ! h ) {
g_errno = EBADHOSTID;
log(LOG_LOGIC,"net: msg0: Bad hostId of %" PRId64".", m_hostId);
logTrace( g_conf.m_logTraceMsg0, "END, return true. Bad hostId" );
return true;
}
// if niceness is 0, use the higher priority udpServer
UdpServer *us ;
uint16_t port;
QUICKPOLL(m_niceness);
us = &g_udpServer ; port = h->m_port ;
// . returns false on error and sets g_errno, true otherwise
// . calls callback when reply is received (or error)
// . we return true if it returns false
if ( ! us->sendRequest ( m_request ,
m_requestSize ,
0x00 , // msgType
h->m_ip ,
port ,
m_hostId ,
NULL , // the slotPtr
this ,
gotSingleReplyWrapper ,
timeout ,
-1 , // backoff
-1 , // maxwait
replyBuf ,
replyBufMaxSize ,
m_niceness ) ) { // cback niceness
logTrace( g_conf.m_logTraceMsg0, "END, return true. Request sent" );
return true;
}
// return false cuz it blocked
logTrace( g_conf.m_logTraceMsg0, "END, return false. sendRequest blocked" );
return false;
}
// timing debug
if ( g_conf.m_logTimingNet )
示例14: handleRequest22
void handleRequest22 ( UdpSlot *slot , int32_t netnice ) {
// shortcut
UdpServer *us = &g_udpServer;
// get the request
Msg22Request *r = (Msg22Request *)slot->m_readBuf;
// sanity check
int32_t requestSize = slot->m_readBufSize;
if ( requestSize < r->getMinSize() ) {
log("db: Got bad request size of %" PRId32" bytes for title record. "
"Need at least 28.", requestSize );
log(LOG_ERROR,"%s:%s:%d: call sendErrorReply.", __FILE__, __func__, __LINE__);
us->sendErrorReply ( slot , EBADREQUESTSIZE );
return;
}
// get base, returns NULL and sets g_errno to ENOCOLLREC on error
RdbBase *tbase = getRdbBase( RDB_TITLEDB, r->m_collnum );
if ( ! tbase ) {
log("db: Could not get title rec in collection # %" PRId32" because rdbbase is null.", (int32_t)r->m_collnum);
g_errno = EBADENGINEER;
log(LOG_ERROR,"%s:%s:%d: call sendErrorReply.", __FILE__, __func__, __LINE__);
us->sendErrorReply ( slot , g_errno );
return;
}
// overwrite what is in there so niceness conversion algo works
r->m_niceness = netnice;
// if just checking tfndb, do not do the cache lookup in clusterdb
if ( r->m_justCheckTfndb ) {
r->m_maxCacheAge = 0;
}
g_titledb.getRdb()->readRequestGet (requestSize);
// breathe
QUICKPOLL ( r->m_niceness);
// sanity check
if ( r->m_collnum < 0 ) { char *xx=NULL;*xx=0; }
// make the state now
State22 *st ;
try { st = new (State22); }
catch ( ... ) {
g_errno = ENOMEM;
log("query: Msg22: new(%" PRId32"): %s", (int32_t)sizeof(State22),
mstrerror(g_errno));
log(LOG_ERROR,"%s:%s:%d: call sendErrorReply.", __FILE__, __func__, __LINE__);
us->sendErrorReply ( slot , g_errno );
return;
}
mnew ( st , sizeof(State22) , "Msg22" );
// store ptr to the msg22request
st->m_r = r;
// save for sending back reply
st->m_slot = slot;
// then tell slot not to free it since m_r references it!
// so we'll have to free it when we destroy State22
st->m_slotAllocSize = slot->m_readBufMaxSize;
st->m_slotReadBuf = slot->m_readBuf;
slot->m_readBuf = NULL;
// . if docId was explicitly specified...
// . we may get multiple tfndb recs
if ( ! r->m_url[0] ) {
st->m_docId1 = r->m_docId;
st->m_docId2 = r->m_docId;
}
// but if we are requesting an available docid, it might be taken
// so try the range
if ( r->m_getAvailDocIdOnly ) {
int64_t pd = r->m_docId;
int64_t d1 = g_titledb.getFirstProbableDocId ( pd );
int64_t d2 = g_titledb.getLastProbableDocId ( pd );
// sanity - bad url with bad subdomain?
if ( pd < d1 || pd > d2 ) { char *xx=NULL;*xx=0; }
// make sure we get a decent sample in titledb then in
// case the docid we wanted is not available
st->m_docId1 = d1;
st->m_docId2 = d2;
}
// . otherwise, url was given, like from Msg15
// . we may get multiple tfndb recs
if ( r->m_url[0] ) {
int32_t dlen = 0;
// this causes ip based urls to be inconsistent with the call
// to getProbableDocId(url) below
char *dom = getDomFast ( r->m_url , &dlen );
// bogus url?
if ( ! dom ) {
log("msg22: got bad url in request: %s from "
"hostid %" PRId32" for msg22 call ",
r->m_url,slot->m_host->m_hostId);
//.........这里部分代码省略.........
示例15: handleRequest1
// . destroys the slot if false is returned
// . this is registered in Msg1::set() to handle add rdb record msgs
// . seems like we should always send back a reply so we don't leave the
// requester's slot hanging, unless he can kill it after transmit success???
// . TODO: need we send a reply back on success????
// . NOTE: Must always call g_udpServer::sendReply or sendErrorReply() so
// read/send bufs can be freed
void handleRequest1 ( UdpSlot *slot , int32_t netnice ) {
// extract what we read
char *readBuf = slot->m_readBuf;
int32_t readBufSize = slot->m_readBufSize;
int32_t niceness = slot->m_niceness;
// select udp server based on niceness
UdpServer *us = &g_udpServer;
// must at least have an rdbId
if ( readBufSize <= 4 ) {
g_errno = EREQUESTTOOSHORT;
log(LOG_ERROR,"%s:%s:%d: call sendErrorReply. Request too short", __FILE__, __func__, __LINE__);
us->sendErrorReply ( slot , g_errno );
return;
}
char *p = readBuf;
char *pend = readBuf + readBufSize;
// extract rdbId
char rdbId = *p++;
// get the rdb to which it belongs, use Msg0::getRdb()
Rdb *rdb = getRdbFromId ( (char) rdbId );
if ( ! rdb ) {
log(LOG_ERROR,"%s:%s:%d: call sendErrorReply. Bad rdbid", __FILE__, __func__, __LINE__);
us->sendErrorReply ( slot, EBADRDBID );
return;
}
// keep track of stats
rdb->readRequestAdd ( readBufSize );
// reset g_errno
g_errno = 0;
// are we injecting some title recs?
bool injecting;
if ( *p & 0x80 ) injecting = true;
else injecting = false;
p++;
// then collection
//char *coll = p;
//p += strlen (p) + 1;
collnum_t collnum = *(collnum_t *)p;
p += sizeof(collnum_t);
// . make a list from this data
// . skip over the first 4 bytes which is the rdbId
// . TODO: embed the rdbId in the msgtype or something...
RdbList list;
// set the list
list.set ( p , // readBuf + 4 ,
pend - p , // readBufSize - 4 ,
p , // readBuf + 4 ,
pend - p , // readBufSize - 4 ,
rdb->getFixedDataSize() ,
false , // ownData?
rdb->useHalfKeys() ,
rdb->getKeySize () );
// note it
//log("msg1: handlerequest1 calling addlist niceness=%" PRId32,niceness);
//log("msg1: handleRequest1 niceness=%" PRId32,niceness);
// this returns false and sets g_errno on error
rdb->addList ( collnum , &list , niceness);
// if titledb, add tfndb recs to map the title recs
//if ( ! g_errno && rdb == g_titledb.getRdb() && injecting )
// updateTfndb ( coll , &list , true, 0);
// but if deleting a "new" and unforced record from spiderdb
// then only delete tfndb record if it was tfn=255
//if ( ! g_errno && rdb == g_spiderdb.getRdb() )
// updateTfndb2 ( coll , &list , false );
// retry on some errors
addedList ( slot , rdb );
}