本文整理汇总了C++中conn函数的典型用法代码示例。如果您正苦于以下问题:C++ conn函数的具体用法?C++ conn怎么用?C++ conn使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了conn函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
//
// Main Routine
//
int main(void) {
#ifdef DEBUG
CSerial ser; // declare a Serial object
ser.enable();
CDebug dbg(ser); // Debug stream use the UART object
dbg.start();
#endif
//
// SoftDevice
//
bleDevice ble;
ble.enable(); // enable BLE SoftDevice stack
// GAP
ble.m_gap.settings(DEVICE_NAME, 10, 50); // set Device Name on GAP, conn. interval min=10ms, max=50ms
ble.m_gap.tx_power(BLE_TX_0dBm); // set Output power
//
// Add BLE UART Service
//
bleServiceUART nus(ble); // declare a BLE "Nordic UART Service" (NUS) object
//
// Add "connection parameters update" negotiation. (optional)
//
bleConnParams conn(ble);
//
// BLE Advertising
//
ble.m_advertising.commpany_identifier(APP_COMPANY_IDENTIFIER);// add company identifier
ble.m_advertising.update(); // update advertising data
// Start advertising
ble.m_advertising.interval(APP_ADV_INTERVAL); // set advertising interval
ble.m_advertising.start();
//
// my command parse class
//
cmdParse cmd;
//
// LED output enable
//
ledLeft.output(); // set ledLeft as an output pin
ledRight.output(); // set ledRight as an output pin
uint8_t ch= 0;
//
// Enable Tickless Technology
//
#ifndef DEBUG
CPowerSave::tickless(true);
#endif
//
// Enter main loop.
//
while (1) {
//
// Uart Service
//
if ( ble.isConnected() ) {
// block in the read() to wait a char.
// Also, block task will save the power when tickless enabled.
while ( nus.readable() ) {
ch = nus.read();
if ( ch ) {
cmd.input(ch);
}
}
} else {
//
// alternate led when disconnected (idle)
//
ch = (ch ? 0 : 1);
ledRight = (ch ? LED_ON : LED_OFF);
ledLeft = (ch ? LED_OFF : LED_ON);
sleep(10); // blink a short time (10ms)
ledRight = LED_OFF;
ledLeft = LED_OFF;
sleep(990); // save power with a long time sleep (990ms)
}
// Negotiate the "Connect Parameters Update"
conn.negotiate();
}
}
示例2: conn
int conn(void)
{
int syserr = -1;
int sock = -1;
sock = socket(AF_INET, SOCK_STREAM, 0);
if (sock == -1)
{
perror("socket");
goto EXIT_LABEL;
}
//u_short port = (time(NULL) % 2) + PORTNO;
u_short port = PORTNO;
struct sockaddr_in addr =
{
.sin_family=AF_INET,
.sin_port=htons(port),
.sin_addr.s_addr=inet_addr("10.96.157.84"),
};
//fprintf(stderr, "connect to port=%u\n", port);
syserr = connect(sock, &addr, sizeof addr);
if (syserr == -1)
{
perror("connect");
goto EXIT_LABEL;
}
syserr = 0;
EXIT_LABEL:
if (syserr)
{
close(sock);
sock = -1;
}
return sock;
}
int send_data(int loop, int start, const char* wstr)
{
int syserr = -1;
int sock = -1;
char* rstr = NULL;
const size_t wstr_len = strlen(wstr);
//fprintf(stderr, "wstr=[%s]\n", wstr);
sock = conn();
if (sock == -1)
{
goto EXIT_LABEL;
}
struct AppHeader_st header = { .type=1, .version=95, .payload_len=wstr_len, };
//fprintf(stderr, "sizeof(header) = %zu\n", sizeof(header));
const size_t hdrlen = sizeof(header);
ssize_t nw = write(sock, &header, hdrlen);
if (nw != (ssize_t)hdrlen)
{
perror("write header");
goto EXIT_LABEL;
}
//fprintf(stderr, "write header success\n");
nw = write(sock, wstr, wstr_len);
if (nw != (ssize_t)wstr_len)
{
perror("write payload");
goto EXIT_LABEL;
}
//fprintf(stderr, "write payload success\n");
ssize_t nr = read(sock, &header, sizeof(header));
//fprintf(stderr, "nr=%zu\n", nr);
if (nr != (ssize_t)hdrlen)
{
perror("read header");
goto EXIT_LABEL;
}
//fprintf(stderr, "read header success\n");
//fprintf(stderr, "payload len=%zu\n", header.payload_len);
if (header.payload_len != wstr_len)
{
//.........这里部分代码省略.........
示例3: fill
void radiosity_lightmap::generate(world_lightmap_access& data,
const chunk_coordinates& pos,
const surface& s, lightmap_hr &lightchunk,
unsigned int phase) const
{
std::array<float, 6> irr_sun, irr_ambient, irr_artificial;
vector half {0.5f, 0.5f, 0.5f};
auto lmi = std::begin(lightchunk);
for (const faces& f : s)
{
fill(irr_sun, 0.f);
fill(irr_ambient, 0.f);
fill(irr_artificial, 0.f);
auto surr (surroundings(f.pos, phase + 1));
for (auto sp : surr)
{
if (sp == f.pos)
continue;
auto found (find(s, sp));
if (found == s.end())
continue;
auto& other (*found);
for (int i = 0; i < 6; ++i)
{
if (!f[i])
continue;
vector this_face (half + f.pos + vector(dir_vector[i]) * 0.52f);
for (int j = 0; j < 6; ++j)
{
if (i == j || !other[j])
continue;
vector that_face (half + other.pos + vector(dir_vector[j])* 0.52f);
vector conn (that_face - this_face);
vector norm_conn (normalize(conn));
float dp1 (dot_prod<vector>(dir_vector[j], -norm_conn));
if (dp1 <= 0)
continue;
float dp2 = dot_prod<vector>(dir_vector[i], norm_conn);
if (dp2 <= 0)
continue;
float intensity = dp1 * dp2 / squared_length(conn);
auto dist = std::distance(std::begin(s), found);
auto& olm = lightchunk.data[dist];
irr_sun[i] += olm.sunlight * intensity;
irr_ambient[i] += olm.ambient * intensity;
irr_artificial[i] += olm.artificial * intensity;
}
}
}
for (int i (0); i < 6; ++i)
{
if (!f[i])
continue;
lmi->r_sunlight = irr_sun[i] * 255.f + 0.49f;
lmi->r_ambient = irr_ambient[i] * 255.f + 0.49f;
lmi->r_artificial = irr_artificial[i] * 255.f + 0.49f;
++lmi;
}
}
}
示例4: appendEmptyResultSet
//.........这里部分代码省略.........
namespaces.executionNss.ns(),
shardQuery,
request.getValue().getCollation(),
&shardResults);
if (mergeCtx->isExplain) {
// This must be checked before we start modifying result.
uassertAllShardsSupportExplain(shardResults);
if (needSplit) {
*result << "needsPrimaryShardMerger" << needPrimaryShardMerger << "splitPipeline"
<< DOC("shardsPart" << shardPipeline->writeExplainOps() << "mergerPart"
<< pipeline.getValue()->writeExplainOps());
} else {
*result << "splitPipeline" << BSONNULL;
}
BSONObjBuilder shardExplains(result->subobjStart("shards"));
for (size_t i = 0; i < shardResults.size(); i++) {
shardExplains.append(shardResults[i].shardTargetId,
BSON("host" << shardResults[i].target.toString() << "stages"
<< shardResults[i].result["stages"]));
}
return Status::OK();
}
if (!needSplit) {
invariant(shardResults.size() == 1);
invariant(shardResults[0].target.getServers().size() == 1);
auto executorPool = Grid::get(txn)->getExecutorPool();
const BSONObj reply =
uassertStatusOK(storePossibleCursor(shardResults[0].target.getServers()[0],
shardResults[0].result,
namespaces.requestedNss,
executorPool->getArbitraryExecutor(),
Grid::get(txn)->getCursorManager()));
result->appendElements(reply);
return getStatusFromCommandResult(reply);
}
pipeline.getValue()->addInitialSource(
DocumentSourceMergeCursors::create(parseCursors(shardResults), mergeCtx));
MutableDocument mergeCmd(request.getValue().serializeToCommandObj());
mergeCmd["pipeline"] = Value(pipeline.getValue()->serialize());
mergeCmd["cursor"] = Value(cmdObj["cursor"]);
if (cmdObj.hasField("$queryOptions")) {
mergeCmd["$queryOptions"] = Value(cmdObj["$queryOptions"]);
}
if (cmdObj.hasField(QueryRequest::cmdOptionMaxTimeMS)) {
mergeCmd[QueryRequest::cmdOptionMaxTimeMS] =
Value(cmdObj[QueryRequest::cmdOptionMaxTimeMS]);
}
mergeCmd.setField("writeConcern", Value(cmdObj["writeConcern"]));
mergeCmd.setField("readConcern", Value(cmdObj["readConcern"]));
// If the user didn't specify a collation already, make sure there's a collation attached to
// the merge command, since the merging shard may not have the collection metadata.
if (mergeCmd.peek()["collation"].missing()) {
mergeCmd.setField("collation",
mergeCtx->getCollator()
? Value(mergeCtx->getCollator()->getSpec().toBSON())
: Value(Document{CollationSpec::kSimpleSpec}));
}
std::string outputNsOrEmpty;
if (DocumentSourceOut* out =
dynamic_cast<DocumentSourceOut*>(pipeline.getValue()->getSources().back().get())) {
outputNsOrEmpty = out->getOutputNs().ns();
}
// Run merging command on random shard, unless a stage needs the primary shard. Need to use
// ShardConnection so that the merging mongod is sent the config servers on connection init.
auto& prng = txn->getClient()->getPrng();
const auto& mergingShardId =
(needPrimaryShardMerger || internalQueryAlwaysMergeOnPrimaryShard.load())
? conf->getPrimaryId()
: shardResults[prng.nextInt32(shardResults.size())].shardTargetId;
const auto mergingShard =
uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, mergingShardId));
ShardConnection conn(mergingShard->getConnString(), outputNsOrEmpty);
BSONObj mergedResults =
aggRunCommand(txn, conn.get(), namespaces, mergeCmd.freeze().toBson(), options);
conn.done();
if (auto wcErrorElem = mergedResults["writeConcernError"]) {
appendWriteConcernErrorToCmdResponse(mergingShardId, wcErrorElem, *result);
}
// Copy output from merging (primary) shard to the output object from our command.
// Also, propagates errmsg and code if ok == false.
result->appendElementsUnique(mergedResults);
return getStatusFromCommandResult(result->asTempObj());
}
示例5: appendReplicationInfo
void appendReplicationInfo(BSONObjBuilder& result, int level) {
if ( replSet ) {
if( theReplSet == 0 || theReplSet->state().shunned() ) {
result.append("ismaster", false);
result.append("secondary", false);
result.append("info", ReplSet::startupStatusMsg.get());
result.append( "isreplicaset" , true );
}
else {
theReplSet->fillIsMaster(result);
}
return;
}
if ( replAllDead ) {
result.append("ismaster", 0);
string s = string("dead: ") + replAllDead;
result.append("info", s);
}
else {
result.appendBool("ismaster", _isMaster() );
}
if ( level && replSet ) {
result.append( "info" , "is replica set" );
}
else if ( level ) {
BSONObjBuilder sources( result.subarrayStart( "sources" ) );
int n = 0;
list<BSONObj> src;
{
Client::ReadContext ctx("local.sources", storageGlobalParams.dbpath);
auto_ptr<Runner> runner(InternalPlanner::collectionScan("local.sources"));
BSONObj obj;
Runner::RunnerState state;
while (Runner::RUNNER_ADVANCED == (state = runner->getNext(&obj, NULL))) {
src.push_back(obj);
}
}
for( list<BSONObj>::const_iterator i = src.begin(); i != src.end(); i++ ) {
BSONObj s = *i;
BSONObjBuilder bb;
bb.append( s["host"] );
string sourcename = s["source"].valuestr();
if ( sourcename != "main" )
bb.append( s["source"] );
{
BSONElement e = s["syncedTo"];
BSONObjBuilder t( bb.subobjStart( "syncedTo" ) );
t.appendDate( "time" , e.timestampTime() );
t.append( "inc" , e.timestampInc() );
t.done();
}
if ( level > 1 ) {
wassert( !Lock::isLocked() );
// note: there is no so-style timeout on this connection; perhaps we should have one.
ScopedDbConnection conn(s["host"].valuestr());
DBClientConnection *cliConn = dynamic_cast< DBClientConnection* >( &conn.conn() );
if ( cliConn && replAuthenticate(cliConn) ) {
BSONObj first = conn->findOne( (string)"local.oplog.$" + sourcename,
Query().sort( BSON( "$natural" << 1 ) ) );
BSONObj last = conn->findOne( (string)"local.oplog.$" + sourcename,
Query().sort( BSON( "$natural" << -1 ) ) );
bb.appendDate( "masterFirst" , first["ts"].timestampTime() );
bb.appendDate( "masterLast" , last["ts"].timestampTime() );
double lag = (double) (last["ts"].timestampTime() - s["syncedTo"].timestampTime());
bb.append( "lagSeconds" , lag / 1000 );
}
conn.done();
}
sources.append( BSONObjBuilder::numStr( n++ ) , bb.obj() );
}
sources.done();
}
}
示例6: conn
int ConfigServer::dbConfigVersion() {
ScopedDbConnection conn( _primary );
int version = dbConfigVersion( conn.conn() );
conn.done();
return version;
}
示例7: run
bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string target = cmdObj.firstElement().valuestrsafe();
Shard s = Shard::make( target );
if ( ! grid.knowAboutShard( s.getConnString() ) ) {
errmsg = "unknown shard";
return false;
}
ScopedDbConnection conn( configServer.getPrimary() );
if (conn->count("config.shards", BSON("_id" << NE << s.getName() << ShardFields::draining(true)))){
conn.done();
errmsg = "Can't have more than one draining shard at a time";
return false;
}
if (conn->count("config.shards", BSON("_id" << NE << s.getName())) == 0){
conn.done();
errmsg = "Can't remove last shard";
return false;
}
// If the server is not yet draining chunks, put it in draining mode.
BSONObj searchDoc = BSON( "_id" << s.getName() );
BSONObj drainingDoc = BSON( "_id" << s.getName() << ShardFields::draining(true) );
BSONObj shardDoc = conn->findOne( "config.shards", drainingDoc );
if ( shardDoc.isEmpty() ) {
// TODO prevent move chunks to this shard.
log() << "going to start draining shard: " << s.getName() << endl;
BSONObj newStatus = BSON( "$set" << BSON( ShardFields::draining(true) ) );
conn->update( "config.shards" , searchDoc , newStatus, false /* do no upsert */);
errmsg = conn->getLastError();
if ( errmsg.size() ) {
log() << "error starting remove shard: " << s.getName() << " err: " << errmsg << endl;
return false;
}
Shard::reloadShardInfo();
result.append( "msg" , "draining started successfully" );
result.append( "state" , "started" );
result.append( "shard" , s.getName() );
conn.done();
return true;
}
// If the server has been completely drained, remove it from the ConfigDB.
// Check not only for chunks but also databases.
BSONObj shardIDDoc = BSON( "shard" << shardDoc[ "_id" ].str() );
long long chunkCount = conn->count( "config.chunks" , shardIDDoc );
BSONObj primaryDoc = BSON( "primary" << shardDoc[ "_id" ].str() );
long long dbCount = conn->count( "config.databases" , primaryDoc );
if ( ( chunkCount == 0 ) && ( dbCount == 0 ) ) {
log() << "going to remove shard: " << s.getName() << endl;
conn->remove( "config.shards" , searchDoc );
errmsg = conn->getLastError();
if ( errmsg.size() ) {
log() << "error concluding remove shard: " << s.getName() << " err: " << errmsg << endl;
return false;
}
Shard::removeShard( shardDoc[ "_id" ].str() );
Shard::reloadShardInfo();
result.append( "msg" , "removeshard completed successfully" );
result.append( "state" , "completed" );
result.append( "shard" , s.getName() );
conn.done();
return true;
}
// If the server is already in draining mode, just report on its progress.
// Report on databases (not just chunks) that are left too.
result.append( "msg" , "draining ongoing" );
result.append( "state" , "ongoing" );
BSONObjBuilder inner;
inner.append( "chunks" , chunkCount );
inner.append( "dbs" , dbCount );
result.append( "remaining" , inner.obj() );
conn.done();
return true;
}
示例8: HHVM_FN
void PDOResource::persistentSave() {
String serialized = HHVM_FN(serialize)(def_stmt_ctor_args);
conn()->serialized_def_stmt_ctor_args = serialized.toCppString();
def_stmt_ctor_args.releaseForSweep(); // we're called from requestShutdown
}
示例9: conn
void PDOResource::persistentRestore() {
auto const serialized = conn()->serialized_def_stmt_ctor_args;
if (!serialized.empty()) {
def_stmt_ctor_args = unserialize_from_string(serialized);
}
}
示例10: main
int main()
{
int content_length = -1;
char method[1024];
char query_string[1024];
char post_data[4096];
memset(method, '\0', sizeof(method));
memset(query_string, '\0', sizeof(query_string));
memset(post_data, '\0', sizeof(post_data));
std::cout<<"<html>"<<std::endl;
std::cout<<"<head>Register The Car's Information Result</head><br/>"<<std::endl;
std::cout<<"<body>"<<std::endl;
strcpy(method, getenv("REQUEST_METHOD"));
if( strcasecmp("GET", method) == 0 )
{
strcpy(query_string, getenv("QUERY_STRING"));
regis_ter(query_string);
}
else if( strcasecmp("POST", method) == 0 )
{
content_length = atoi(getenv("CONTENT_LENGTH"));
int i = 0;
for(; i < content_length; i++ )
{
read(0, &post_data[i], 1);
}
post_data[i] = '\0';
regis_ter(post_data);
}
std::string _sql_data[1024][5];
std::string header[5];
int curr_row = -1;
sql_connecter conn(_remote_ip,_remote_user,_remote_passwd,_remote_db);
conn.begin_connect();
conn.select_sql(header,_sql_data,curr_row);
std::cout<<"<table border=\"1\">"<<std::endl;
std::cout<<"<tr>"<<std::endl;
for(int i = 0;i < 5;i++)
{
std::cout<<"<th>"<<header[i]<<"</th>"<<std::endl;
}
std::cout<<"</tr>"<<std::endl;
for(int i = 0;i < curr_row; i++)
{
std::cout<<"<tr>"<<std::endl;
for(int j =0;j < 5;j++)
{
std::cout<<"<td>"<<_sql_data[i][j]<<"</td>"<<std::endl;
}
std::cout<<"</tr>"<<std::endl;
}
std::cout<<"</table>"<<std::endl;
std::cout<<"</body>"<<std::endl;
std::cout<<"</html>"<<std::endl;
return 0;
}
示例11: assert
void PDOResource::sweep() {
assert(!conn()->is_persistent);
def_stmt_ctor_args.releaseForSweep();
this->~PDOResource();
}
示例12: run
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string dbname = cc().database()->name; // this has to come before dbtemprelease
dbtemprelease temprelease; // we don't touch the db directly
string shardedOutputCollection = cmdObj["shardedOutputCollection"].valuestrsafe();
MRSetup mr( dbname , cmdObj.firstElement().embeddedObjectUserCheck() , false );
set<ServerAndQuery> servers;
BSONObjBuilder shardCounts;
map<string,long long> counts;
BSONObj shards = cmdObj["shards"].embeddedObjectUserCheck();
vector< auto_ptr<DBClientCursor> > shardCursors;
BSONObjIterator i( shards );
while ( i.more() ) {
BSONElement e = i.next();
string shard = e.fieldName();
BSONObj res = e.embeddedObjectUserCheck();
uassert( 10078 , "something bad happened" , shardedOutputCollection == res["result"].valuestrsafe() );
servers.insert( shard );
shardCounts.appendAs( res["counts"] , shard.c_str() );
BSONObjIterator j( res["counts"].embeddedObjectUserCheck() );
while ( j.more() ) {
BSONElement temp = j.next();
counts[temp.fieldName()] += temp.numberLong();
}
}
BSONObj sortKey = BSON( "_id" << 1 );
ParallelSortClusteredCursor cursor( servers , dbname + "." + shardedOutputCollection ,
Query().sort( sortKey ) );
auto_ptr<Scope> s = globalScriptEngine->getPooledScope( ns );
ScriptingFunction reduceFunction = s->createFunction( mr.reduceCode.c_str() );
ScriptingFunction finalizeFunction = 0;
if ( mr.finalizeCode.size() )
finalizeFunction = s->createFunction( mr.finalizeCode.c_str() );
BSONList values;
result.append( "result" , mr.finalShort );
DBDirectClient db;
while ( cursor.more() ) {
BSONObj t = cursor.next().getOwned();
if ( values.size() == 0 ) {
values.push_back( t );
continue;
}
if ( t.woSortOrder( *(values.begin()) , sortKey ) == 0 ) {
values.push_back( t );
continue;
}
db.insert( mr.tempLong , reduceValues( values , s.get() , reduceFunction , 1 , finalizeFunction ) );
values.clear();
values.push_back( t );
}
if ( values.size() )
db.insert( mr.tempLong , reduceValues( values , s.get() , reduceFunction , 1 , finalizeFunction ) );
long long finalCount = mr.renameIfNeeded( db );
log(0) << " mapreducefinishcommand " << mr.finalLong << " " << finalCount << endl;
for ( set<ServerAndQuery>::iterator i=servers.begin(); i!=servers.end(); i++ ) {
ScopedDbConnection conn( i->_server );
conn->dropCollection( dbname + "." + shardedOutputCollection );
}
result.append( "shardCounts" , shardCounts.obj() );
{
BSONObjBuilder c;
for ( map<string,long long>::iterator i=counts.begin(); i!=counts.end(); i++ ) {
c.append( i->first , i->second );
}
result.append( "counts" , c.obj() );
}
return 1;
}
示例13: if
bool ClientInfo::getLastError( const string& dbName,
const BSONObj& options,
BSONObjBuilder& result,
string& errmsg,
bool fromWriteBackListener)
{
scoped_ptr<TimerHolder> gleTimerHolder;
if ( ! fromWriteBackListener ) {
bool doTiming = false;
const BSONElement& e = options["w"];
if ( e.isNumber() ) {
doTiming = e.numberInt() > 1;
}
else if ( e.type() == String ) {
doTiming = true;
}
if ( doTiming ) {
gleTimerHolder.reset( new TimerHolder( &gleWtimeStats ) );
}
}
set<string> * shards = getPrev();
if ( shards->size() == 0 ) {
result.appendNull( "err" );
return true;
}
vector<WBInfo> writebacks;
//
// TODO: These branches should be collapsed into a single codepath
//
// handle single server
if ( shards->size() == 1 ) {
string theShard = *(shards->begin() );
BSONObj res;
bool ok = false;
{
LOG(5) << "gathering response for gle from: " << theShard << endl;
ShardConnection conn( theShard , "" );
try {
ok = conn->runCommand( dbName , options , res );
}
catch( std::exception &e ) {
string message =
str::stream() << "could not get last error from shard " << theShard
<< causedBy( e );
warning() << message << endl;
errmsg = message;
// Catch everything that happens here, since we need to ensure we return our connection when we're
// finished.
conn.done();
return false;
}
res = res.getOwned();
conn.done();
}
_addWriteBack( writebacks, res, true );
LOG(4) << "gathering writebacks from " << sinceLastGetError().size() << " hosts for"
<< " gle (" << theShard << ")" << endl;
// hit other machines just to block
for ( set<string>::const_iterator i=sinceLastGetError().begin(); i!=sinceLastGetError().end(); ++i ) {
string temp = *i;
if ( temp == theShard )
continue;
LOG(5) << "gathering writebacks for single-shard gle from: " << temp << endl;
try {
ShardConnection conn( temp , "" );
ON_BLOCK_EXIT_OBJ( conn, &ShardConnection::done );
_addWriteBack( writebacks, conn->getLastErrorDetailed(), false );
}
catch( std::exception &e ){
warning() << "could not clear last error from shard " << temp << causedBy( e ) << endl;
}
}
clearSinceLastGetError();
// We never need to handle writebacks if we're coming from the wbl itself
if ( writebacks.size() && !fromWriteBackListener ){
LOG(4) << "checking " << writebacks.size() << " writebacks for"
//.........这里部分代码省略.........
示例14: reload
void reload() {
list<BSONObj> all;
{
scoped_ptr<ScopedDbConnection> conn(
ScopedDbConnection::getInternalScopedDbConnection(
configServer.getPrimary().getConnString(), 30));
auto_ptr<DBClientCursor> c = conn->get()->query(ShardType::ConfigNS , Query());
massert( 13632 , "couldn't get updated shard list from config server" , c.get() );
while ( c->more() ) {
all.push_back( c->next().getOwned() );
}
conn->done();
}
scoped_lock lk( _mutex );
// We use the _lookup table for all shards and for the primary config DB. The config DB info,
// however, does not come from the ShardNS::shard. So when cleaning the _lookup table we leave
// the config state intact. The rationale is that this way we could drop shards that
// were removed without reinitializing the config DB information.
ShardMap::iterator i = _lookup.find( "config" );
if ( i != _lookup.end() ) {
ShardPtr config = i->second;
_lookup.clear();
_lookup[ "config" ] = config;
}
else {
_lookup.clear();
}
_rsLookup.clear();
for ( list<BSONObj>::iterator i=all.begin(); i!=all.end(); ++i ) {
BSONObj o = *i;
string name = o[ ShardType::name() ].String();
string host = o[ ShardType::host() ].String();
long long maxSize = 0;
BSONElement maxSizeElem = o[ ShardType::maxSize.name() ];
if ( ! maxSizeElem.eoo() ) {
maxSize = maxSizeElem.numberLong();
}
bool isDraining = false;
BSONElement isDrainingElem = o[ ShardType::draining.name() ];
if ( ! isDrainingElem.eoo() ) {
isDraining = isDrainingElem.Bool();
}
ShardPtr s( new Shard( name , host , maxSize , isDraining ) );
if ( o[ ShardType::tags() ].type() == Array ) {
vector<BSONElement> v = o[ ShardType::tags() ].Array();
for ( unsigned j=0; j<v.size(); j++ ) {
s->addTag( v[j].String() );
}
}
_lookup[name] = s;
_installHost( host , s );
}
}
示例15: log
bool DBConfig::dropDatabase( string& errmsg ) {
/**
* 1) make sure everything is up
* 2) update config server
* 3) drop and reset sharded collections
* 4) drop and reset primary
* 5) drop everywhere to clean up loose ends
*/
log() << "DBConfig::dropDatabase: " << _name << endl;
configServer.logChange( "dropDatabase.start" , _name , BSONObj() );
// 1
if ( ! configServer.allUp( errmsg ) ) {
log(1) << "\t DBConfig::dropDatabase not all up" << endl;
return 0;
}
// 2
grid.removeDB( _name );
{
ScopedDbConnection conn( configServer.modelServer() );
conn->remove( ShardNS::database , BSON( "_id" << _name ) );
errmsg = conn->getLastError();
if ( ! errmsg.empty() ) {
log() << "could not drop '" << _name << "': " << errmsg << endl;
conn.done();
return false;
}
conn.done();
}
if ( ! configServer.allUp( errmsg ) ) {
log() << "error removing from config server even after checking!" << endl;
return 0;
}
log(1) << "\t removed entry from config server for: " << _name << endl;
set<Shard> allServers;
// 3
while ( true ) {
int num = 0;
if ( ! _dropShardedCollections( num , allServers , errmsg ) )
return 0;
log() << " DBConfig::dropDatabase: " << _name << " dropped sharded collections: " << num << endl;
if ( num == 0 )
break;
}
// 4
{
ScopedDbConnection conn( _primary );
BSONObj res;
if ( ! conn->dropDatabase( _name , &res ) ) {
errmsg = res.toString();
return 0;
}
conn.done();
}
// 5
for ( set<Shard>::iterator i=allServers.begin(); i!=allServers.end(); i++ ) {
ScopedDbConnection conn( *i );
BSONObj res;
if ( ! conn->dropDatabase( _name , &res ) ) {
errmsg = res.toString();
return 0;
}
conn.done();
}
log(1) << "\t dropped primary db for: " << _name << endl;
configServer.logChange( "dropDatabase" , _name , BSONObj() );
return true;
}