本文整理汇总了C++中DbMessage::reservedField方法的典型用法代码示例。如果您正苦于以下问题:C++ DbMessage::reservedField方法的具体用法?C++ DbMessage::reservedField怎么用?C++ DbMessage::reservedField使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类DbMessage
的用法示例。
在下文中一共展示了DbMessage::reservedField方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: _insert
void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ) {
const int flags = d.reservedField() | InsertOption_ContinueOnError; // ContinueOnError is always on when using sharding.
map<ChunkPtr, vector<BSONObj> > insertsForChunk; // Group bulk insert for appropriate shards
try {
while ( d.moreJSObjs() ) {
BSONObj o = d.nextJsObj();
if ( ! manager->hasShardKey( o ) ) {
bool bad = true;
if ( manager->getShardKey().partOfShardKey( "_id" ) ) {
BSONObjBuilder b;
b.appendOID( "_id" , 0 , true );
b.appendElements( o );
o = b.obj();
bad = ! manager->hasShardKey( o );
}
if ( bad ) {
log() << "tried to insert object with no valid shard key: " << r.getns() << " " << o << endl;
uasserted( 8011 , "tried to insert object with no valid shard key" );
}
}
// Many operations benefit from having the shard key early in the object
o = manager->getShardKey().moveToFront(o);
insertsForChunk[manager->findChunk(o)].push_back(o);
}
for (map<ChunkPtr, vector<BSONObj> >::iterator it = insertsForChunk.begin(); it != insertsForChunk.end(); ++it) {
ChunkPtr c = it->first;
vector<BSONObj> objs = it->second;
const int maxTries = 30;
bool gotThrough = false;
for ( int i=0; i<maxTries; i++ ) {
try {
LOG(4) << " server:" << c->getShard().toString() << " bulk insert " << objs.size() << " documents" << endl;
insert( c->getShard() , r.getns() , objs , flags);
int bytesWritten = 0;
for (vector<BSONObj>::iterator vecIt = objs.begin(); vecIt != objs.end(); ++vecIt) {
r.gotInsert(); // Record the correct number of individual inserts
bytesWritten += (*vecIt).objsize();
}
if ( r.getClientInfo()->autoSplitOk() )
c->splitIfShould( bytesWritten );
gotThrough = true;
break;
}
catch ( StaleConfigException& e ) {
int logLevel = i < ( maxTries / 2 );
LOG( logLevel ) << "retrying bulk insert of " << objs.size() << " documents because of StaleConfigException: " << e << endl;
r.reset();
manager = r.getChunkManager();
if( ! manager ) {
uasserted(14804, "collection no longer sharded");
}
unsigned long long old = manager->getSequenceNumber();
LOG( logLevel ) << " sequence number - old: " << old << " new: " << manager->getSequenceNumber() << endl;
}
sleepmillis( i * 20 );
}
assert( inShutdown() || gotThrough ); // not caught below
}
} catch (const UserException&){
if (!d.moreJSObjs()){
throw;
}
// Ignore and keep going. ContinueOnError is implied with sharding.
}
}
示例2: _insert
void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager, vector<BSONObj>& insertsRemaining, map<ChunkPtr, vector<BSONObj> > insertsForChunks, int retries = 0 ) {
uassert( 16055, str::stream() << "too many retries during bulk insert, " << insertsRemaining.size() << " inserts remaining", retries < 30 );
uassert( 16056, str::stream() << "shutting down server during bulk insert, " << insertsRemaining.size() << " inserts remaining", ! inShutdown() );
const int flags = d.reservedField() | InsertOption_ContinueOnError; // ContinueOnError is always on when using sharding.
_groupInserts( manager, insertsRemaining, insertsForChunks );
while( ! insertsForChunks.empty() ){
ChunkPtr c = insertsForChunks.begin()->first;
vector<BSONObj>& objs = insertsForChunks.begin()->second;
const Shard& shard = c->getShard();
const string& ns = r.getns();
ShardConnection dbcon( shard, ns, manager );
try {
LOG(4) << " server:" << c->getShard().toString() << " bulk insert " << objs.size() << " documents" << endl;
// Taken from single-shard bulk insert, should not need multiple methods in future
// insert( c->getShard() , r.getns() , objs , flags);
// It's okay if the version is set here, an exception will be thrown if the version is incompatible
dbcon.setVersion();
dbcon->insert( ns , objs , flags);
// TODO: Option for safe inserts here - can then use this for all inserts
dbcon.done();
int bytesWritten = 0;
for (vector<BSONObj>::iterator vecIt = objs.begin(); vecIt != objs.end(); ++vecIt) {
r.gotInsert(); // Record the correct number of individual inserts
bytesWritten += (*vecIt).objsize();
}
// TODO: The only reason we're grouping by chunks here is for auto-split, more efficient
// to track this separately and bulk insert to shards
if ( r.getClientInfo()->autoSplitOk() )
c->splitIfShould( bytesWritten );
}
catch ( StaleConfigException& e ) {
// Cleanup the connection
dbcon.done();
// Assume the inserts did *not* succeed, so we don't want to erase them
int logLevel = retries < 2;
LOG( logLevel ) << "retrying bulk insert of " << objs.size() << " documents to chunk " << c << " because of StaleConfigException: " << e << endl;
if( retries > 2 ){
versionManager.forceRemoteCheckShardVersionCB( e.getns() );
}
// TODO: Replace with actual chunk handling code, simplify request
r.reset();
manager = r.getChunkManager();
if( ! manager ) {
// TODO : We can probably handle this better?
uasserted( 14804, "collection no longer sharded" );
}
// End TODO
// We may need to regroup at least some of our inserts since our chunk manager may have changed
_insert( r, d, manager, insertsRemaining, insertsForChunks, retries + 1 );
return;
}
catch( UserException& ){
// Unexpected exception, so don't clean up the conn
dbcon.kill();
// These inserts won't be retried, as something weird happened here
insertsForChunks.erase( insertsForChunks.begin() );
// Throw if this is the last chunk bulk-inserted to
if( insertsForChunks.empty() ){
throw;
}
}
insertsForChunks.erase( insertsForChunks.begin() );
}
}