本文整理汇总了C++中ChunkManagerPtr类的典型用法代码示例。如果您正苦于以下问题:C++ ChunkManagerPtr类的具体用法?C++ ChunkManagerPtr怎么用?C++ ChunkManagerPtr使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ChunkManagerPtr类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: run
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
ShardConnection::sync();
string ns = cmdObj.firstElement().valuestrsafe();
if ( ns.size() == 0 ){
errmsg = "no ns";
return false;
}
DBConfigPtr config = grid.getDBConfig( ns );
if ( ! config->isSharded( ns ) ){
errmsg = "ns not sharded. have to shard before can split";
return false;
}
BSONObj find = cmdObj.getObjectField( "find" );
if ( find.isEmpty() ){
find = cmdObj.getObjectField( "middle" );
if ( find.isEmpty() ){
errmsg = "need to specify find or middle";
return false;
}
}
ChunkManagerPtr info = config->getChunkManager( ns );
ChunkPtr old = info->findChunk( find );
return _split( result , errmsg , ns , info , old , cmdObj.getObjectField( "middle" ) );
}
示例2: uassert
ChunkManagerPtr DBConfig::shardCollection( const string& ns , ShardKeyPattern fieldsAndOrder , bool unique ) {
uassert( 8042 , "db doesn't have sharding enabled" , _shardingEnabled );
scoped_lock lk( _lock );
CollectionInfo& ci = _collections[ns];
uassert( 8043 , "collection already sharded" , ! ci.isSharded() );
log() << "enable sharding on: " << ns << " with shard key: " << fieldsAndOrder << endl;
// From this point on, 'ns' is going to be treated as a sharded collection. We assume this is the first
// time it is seen by the sharded system and thus create the first chunk for the collection. All the remaining
// chunks will be created as a by-product of splitting.
ci.shard( ns , fieldsAndOrder , unique );
ChunkManagerPtr cm = ci.getCM();
uassert( 13449 , "collections already sharded" , (cm->numChunks() == 0) );
cm->createFirstChunk( getPrimary() );
_save();
try {
cm->maybeChunkCollection();
}
catch ( UserException& e ) {
// failure to chunk is not critical enough to abort the command (and undo the _save()'d configDB state)
log() << "couldn't chunk recently created collection: " << ns << " " << e << endl;
}
return cm;
}
示例3: _insert
void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ){
while ( d.moreJSObjs() ){
BSONObj o = d.nextJsObj();
if ( ! manager->hasShardKey( o ) ){
bool bad = true;
if ( manager->getShardKey().partOfShardKey( "_id" ) ){
BSONObjBuilder b;
b.appendOID( "_id" , 0 , true );
b.appendElements( o );
o = b.obj();
bad = ! manager->hasShardKey( o );
}
if ( bad ){
log() << "tried to insert object without shard key: " << r.getns() << " " << o << endl;
throw UserException( 8011 , "tried to insert object without shard key" );
}
}
ChunkPtr c = manager->findChunk( o );
log(4) << " server:" << c->getShard().toString() << " " << o << endl;
insert( c->getShard() , r.getns() , o );
r.gotInsert();
c->splitIfShould( o.objsize() );
}
}
示例4: _update
void _update( Request& r , DbMessage& d, ChunkManagerPtr manager ) {
int flags = d.pullInt();
BSONObj query = d.nextJsObj();
uassert( 13506 , "$atomic not supported sharded" , query["$atomic"].eoo() );
uassert( 10201 , "invalid update" , d.moreJSObjs() );
BSONObj toupdate = d.nextJsObj();
BSONObj chunkFinder = query;
bool upsert = flags & UpdateOption_Upsert;
bool multi = flags & UpdateOption_Multi;
uassert( 10202 , "can't mix multi and upsert and sharding" , ! ( upsert && multi ) );
if (upsert) {
uassert(8012, "can't upsert something without shard key",
(manager->hasShardKey(toupdate) ||
(toupdate.firstElement().fieldName()[0] == '$' && manager->hasShardKey(query))));
BSONObj key = manager->getShardKey().extractKey(query);
BSONForEach(e, key) {
uassert(13465, "shard key in upsert query must be an exact match", getGtLtOp(e) == BSONObj::Equality);
}
}
示例5: doShardedIndexQuery
/**
* Returns true if request is a query for sharded indexes.
*/
static bool doShardedIndexQuery(OperationContext* txn, Request& r, const QuerySpec& qSpec) {
// Extract the ns field from the query, which may be embedded within the "query" or
// "$query" field.
auto nsField = qSpec.filter()["ns"];
if (nsField.eoo()) {
return false;
}
const NamespaceString indexNSSQuery(nsField.str());
auto status = grid.catalogCache()->getDatabase(txn, indexNSSQuery.db().toString());
if (!status.isOK()) {
return false;
}
shared_ptr<DBConfig> config = status.getValue();
if (!config->isSharded(indexNSSQuery.ns())) {
return false;
}
// if you are querying on system.indexes, we need to make sure we go to a shard
// that actually has chunks. This is not a perfect solution (what if you just
// look at all indexes), but better than doing nothing.
ShardPtr shard;
ChunkManagerPtr cm;
config->getChunkManagerOrPrimary(indexNSSQuery.ns(), cm, shard);
if (cm) {
set<ShardId> shardIds;
cm->getAllShardIds(&shardIds);
verify(shardIds.size() > 0);
shard = grid.shardRegistry()->getShard(*shardIds.begin());
}
ShardConnection dbcon(shard->getConnString(), r.getns());
DBClientBase& c = dbcon.conn();
string actualServer;
Message response;
bool ok = c.call(r.m(), response, true, &actualServer);
uassert(10200, "mongos: error calling db", ok);
{
QueryResult::View qr = response.singleData().view2ptr();
if (qr.getResultFlags() & ResultFlag_ShardConfigStale) {
dbcon.done();
// Version is zero b/c this is deprecated codepath
throw RecvStaleConfigException(r.getns(),
"Strategy::doQuery",
ChunkVersion(0, 0, OID()),
ChunkVersion(0, 0, OID()));
}
}
r.reply(response, actualServer.size() ? actualServer : c.getServerAddress());
dbcon.done();
return true;
}
示例6: verify
int Balancer::_moveChunks( const vector<CandidateChunkPtr>* candidateChunks , bool secondaryThrottle ) {
int movedCount = 0;
for ( vector<CandidateChunkPtr>::const_iterator it = candidateChunks->begin(); it != candidateChunks->end(); ++it ) {
const CandidateChunk& chunkInfo = *it->get();
DBConfigPtr cfg = grid.getDBConfig( chunkInfo.ns );
verify( cfg );
ChunkManagerPtr cm = cfg->getChunkManager( chunkInfo.ns );
verify( cm );
ChunkPtr c = cm->findChunk( chunkInfo.chunk.min );
if ( c->getMin().woCompare( chunkInfo.chunk.min ) || c->getMax().woCompare( chunkInfo.chunk.max ) ) {
// likely a split happened somewhere
cm = cfg->getChunkManager( chunkInfo.ns , true /* reload */);
verify( cm );
c = cm->findChunk( chunkInfo.chunk.min );
if ( c->getMin().woCompare( chunkInfo.chunk.min ) || c->getMax().woCompare( chunkInfo.chunk.max ) ) {
log() << "chunk mismatch after reload, ignoring will retry issue " << chunkInfo.chunk.toString() << endl;
continue;
}
}
BSONObj res;
if ( c->moveAndCommit( Shard::make( chunkInfo.to ) , Chunk::MaxChunkSize , secondaryThrottle , res ) ) {
movedCount++;
continue;
}
// the move requires acquiring the collection metadata's lock, which can fail
log() << "balancer move failed: " << res << " from: " << chunkInfo.from << " to: " << chunkInfo.to
<< " chunk: " << chunkInfo.chunk << endl;
if ( res["chunkTooBig"].trueValue() ) {
// reload just to be safe
cm = cfg->getChunkManager( chunkInfo.ns );
verify( cm );
c = cm->findChunk( chunkInfo.chunk.min );
log() << "forcing a split because migrate failed for size reasons" << endl;
res = BSONObj();
c->singleSplit( true , res );
log() << "forced split results: " << res << endl;
if ( ! res["ok"].trueValue() ) {
log() << "marking chunk as jumbo: " << c->toString() << endl;
c->markAsJumbo();
// we increment moveCount so we do another round right away
movedCount++;
}
}
}
return movedCount;
}
示例7: run
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
ShardConnection::sync();
string ns = cmdObj.firstElement().valuestrsafe();
if ( ns.size() == 0 ) {
errmsg = "no ns";
return false;
}
DBConfigPtr config = grid.getDBConfig( ns );
if ( ! config->isSharded( ns ) ) {
errmsg = "ns not sharded. have to shard before can split";
return false;
}
BSONObj find = cmdObj.getObjectField( "find" );
if ( find.isEmpty() ) {
find = cmdObj.getObjectField( "middle" );
if ( find.isEmpty() ) {
errmsg = "need to specify find or middle";
return false;
}
}
ChunkManagerPtr info = config->getChunkManager( ns );
ChunkPtr chunk = info->findChunk( find );
BSONObj middle = cmdObj.getObjectField( "middle" );
assert( chunk.get() );
log() << "splitting: " << ns << " shard: " << chunk << endl;
BSONObj res;
ChunkPtr p;
if ( middle.isEmpty() ) {
p = chunk->singleSplit( true /* force a split even if not enough data */ , res );
}
else {
// sanity check if the key provided is a valid split point
if ( ( middle == chunk->getMin() ) || ( middle == chunk->getMax() ) ) {
errmsg = "cannot split on initial or final chunk's key";
return false;
}
vector<BSONObj> splitPoints;
splitPoints.push_back( middle );
p = chunk->multiSplit( splitPoints , res );
}
if ( p.get() == NULL ) {
errmsg = "split failed";
result.append( "cause" , res );
return false;
}
return true;
}
示例8: run
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
ShardConnection::sync();
Timer t;
string ns = cmdObj.firstElement().valuestrsafe();
if ( ns.size() == 0 ){
errmsg = "no ns";
return false;
}
DBConfigPtr config = grid.getDBConfig( ns );
if ( ! config->isSharded( ns ) ){
errmsg = "ns not sharded. have to shard before can move a chunk";
return false;
}
BSONObj find = cmdObj.getObjectField( "find" );
if ( find.isEmpty() ){
errmsg = "need to specify find. see help";
return false;
}
string toString = cmdObj["to"].valuestrsafe();
if ( ! toString.size() ){
errmsg = "you have to specify where you want to move the chunk";
return false;
}
Shard to = Shard::make( toString );
// so far, chunk size serves test purposes; it may or may not become a supported parameter
long long maxChunkSizeBytes = cmdObj["maxChunkSizeBytes"].numberLong();
if ( maxChunkSizeBytes == 0 ) {
maxChunkSizeBytes = Chunk::MaxChunkSize;
}
tlog() << "CMD: movechunk: " << cmdObj << endl;
ChunkManagerPtr info = config->getChunkManager( ns );
ChunkPtr c = info->findChunk( find );
const Shard& from = c->getShard();
if ( from == to ){
errmsg = "that chunk is already on that shard";
return false;
}
BSONObj res;
if ( ! c->moveAndCommit( to , maxChunkSizeBytes , res ) ){
errmsg = "move failed";
result.append( "cause" , res );
return false;
}
result.append( "millis" , t.millis() );
return true;
}
示例9: assert
int Balancer::_moveChunks( const vector<CandidateChunkPtr>* candidateChunks ) {
int movedCount = 0;
for ( vector<CandidateChunkPtr>::const_iterator it = candidateChunks->begin(); it != candidateChunks->end(); ++it ) {
const CandidateChunk& chunkInfo = *it->get();
DBConfigPtr cfg = grid.getDBConfig( chunkInfo.ns );
assert( cfg );
ChunkManagerPtr cm = cfg->getChunkManager( chunkInfo.ns );
assert( cm );
const BSONObj& chunkToMove = chunkInfo.chunk;
ChunkPtr c = cm->findChunk( chunkToMove["min"].Obj() );
if ( c->getMin().woCompare( chunkToMove["min"].Obj() ) || c->getMax().woCompare( chunkToMove["max"].Obj() ) ) {
// likely a split happened somewhere
cm = cfg->getChunkManager( chunkInfo.ns , true /* reload */);
assert( cm );
c = cm->findChunk( chunkToMove["min"].Obj() );
if ( c->getMin().woCompare( chunkToMove["min"].Obj() ) || c->getMax().woCompare( chunkToMove["max"].Obj() ) ) {
log() << "chunk mismatch after reload, ignoring will retry issue cm: "
<< c->getMin() << " min: " << chunkToMove["min"].Obj() << endl;
continue;
}
}
BSONObj res;
if ( c->moveAndCommit( Shard::make( chunkInfo.to ) , Chunk::MaxChunkSize , res ) ) {
movedCount++;
continue;
}
// the move requires acquiring the collection metadata's lock, which can fail
log() << "balacer move failed: " << res << " from: " << chunkInfo.from << " to: " << chunkInfo.to
<< " chunk: " << chunkToMove << endl;
if ( res["chunkTooBig"].trueValue() ) {
// reload just to be safe
cm = cfg->getChunkManager( chunkInfo.ns );
assert( cm );
c = cm->findChunk( chunkToMove["min"].Obj() );
log() << "forcing a split because migrate failed for size reasons" << endl;
res = BSONObj();
c->singleSplit( true , res );
log() << "forced split results: " << res << endl;
// TODO: if the split fails, mark as jumbo SERVER-2571
}
}
return movedCount;
}
示例10: checkShardVersion
void checkShardVersion( DBClientBase& conn , const string& ns , bool authoritative ){
// TODO: cache, optimize, etc...
WriteBackListener::init( conn );
DBConfigPtr conf = grid.getDBConfig( ns );
if ( ! conf )
return;
ShardChunkVersion version = 0;
unsigned long long officialSequenceNumber = 0;
ChunkManagerPtr manager;
const bool isSharded = conf->isSharded( ns );
if ( isSharded ){
manager = conf->getChunkManager( ns , authoritative );
officialSequenceNumber = manager->getSequenceNumber();
}
unsigned long long & sequenceNumber = checkShardVersionLastSequence[ make_pair(&conn,ns) ];
if ( sequenceNumber == officialSequenceNumber )
return;
if ( isSharded ){
version = manager->getVersion( Shard::make( conn.getServerAddress() ) );
}
log(2) << " have to set shard version for conn: " << &conn << " ns:" << ns
<< " my last seq: " << sequenceNumber << " current: " << officialSequenceNumber
<< " version: " << version << " manager: " << manager.get()
<< endl;
BSONObj result;
if ( setShardVersion( conn , ns , version , authoritative , result ) ){
// success!
log(1) << " setShardVersion success!" << endl;
sequenceNumber = officialSequenceNumber;
dassert( sequenceNumber == checkShardVersionLastSequence[ make_pair(&conn,ns) ] );
return;
}
log(1) << " setShardVersion failed!\n" << result << endl;
if ( result.getBoolField( "need_authoritative" ) )
massert( 10428 , "need_authoritative set but in authoritative mode already" , ! authoritative );
if ( ! authoritative ){
checkShardVersion( conn , ns , 1 );
return;
}
log() << " setShardVersion failed: " << result << endl;
massert( 10429 , (string)"setShardVersion failed! " + result.jsonString() , 0 );
}
示例11: _insert
void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ) {
while ( d.moreJSObjs() ) {
BSONObj o = d.nextJsObj();
if ( ! manager->hasShardKey( o ) ) {
bool bad = true;
if ( manager->getShardKey().partOfShardKey( "_id" ) ) {
BSONObjBuilder b;
b.appendOID( "_id" , 0 , true );
b.appendElements( o );
o = b.obj();
bad = ! manager->hasShardKey( o );
}
if ( bad ) {
log() << "tried to insert object without shard key: " << r.getns() << " " << o << endl;
throw UserException( 8011 , "tried to insert object without shard key" );
}
}
// Many operations benefit from having the shard key early in the object
o = manager->getShardKey().moveToFront(o);
const int maxTries = 10;
bool gotThrough = false;
for ( int i=0; i<maxTries; i++ ) {
try {
ChunkPtr c = manager->findChunk( o );
log(4) << " server:" << c->getShard().toString() << " " << o << endl;
insert( c->getShard() , r.getns() , o );
r.gotInsert();
if ( r.getClientInfo()->autoSplitOk() )
c->splitIfShould( o.objsize() );
gotThrough = true;
break;
}
catch ( StaleConfigException& e ) {
log( i < ( maxTries / 2 ) ) << "retrying insert because of StaleConfigException: " << e << " object: " << o << endl;
r.reset();
manager = r.getChunkManager();
uassert(14804, "collection no longer sharded", manager);
}
sleepmillis( i * 200 );
}
assert( inShutdown() || gotThrough );
}
}
示例12: doShardedIndexQuery
/**
* Returns true if request is a query for sharded indexes.
*/
static bool doShardedIndexQuery( Request& r, const QuerySpec& qSpec ) {
// Extract the ns field from the query, which may be embedded within the "query" or
// "$query" field.
string indexNSQuery(qSpec.filter()["ns"].str());
DBConfigPtr config = grid.getDBConfig( r.getns() );
if ( !config->isSharded( indexNSQuery )) {
return false;
}
// if you are querying on system.indexes, we need to make sure we go to a shard
// that actually has chunks. This is not a perfect solution (what if you just
// look at all indexes), but better than doing nothing.
ShardPtr shard;
ChunkManagerPtr cm;
config->getChunkManagerOrPrimary( indexNSQuery, cm, shard );
if ( cm ) {
set<Shard> shards;
cm->getAllShards( shards );
verify( shards.size() > 0 );
shard.reset( new Shard( *shards.begin() ) );
}
ShardConnection dbcon( *shard , r.getns() );
DBClientBase &c = dbcon.conn();
string actualServer;
Message response;
bool ok = c.call( r.m(), response, true , &actualServer );
uassert( 10200 , "mongos: error calling db", ok );
{
QueryResult *qr = (QueryResult *) response.singleData();
if ( qr->resultFlags() & ResultFlag_ShardConfigStale ) {
dbcon.done();
// Version is zero b/c this is deprecated codepath
throw RecvStaleConfigException( r.getns(),
"Strategy::doQuery",
ChunkVersion( 0, 0, OID() ),
ChunkVersion( 0, 0, OID() ));
}
}
r.reply( response , actualServer.size() ? actualServer : c.getServerAddress() );
dbcon.done();
return true;
}
示例13: lk
ChunkManagerPtr DBConfig::getChunkManager( const string& ns , bool reload ){
scoped_lock lk( _lock );
ChunkManagerPtr m = _shards[ns];
if ( m && ! reload )
return m;
uassert( 10181 , (string)"not sharded:" + ns , _isSharded( ns ) );
if ( m && reload )
log() << "reloading shard info for: " << ns << endl;
m.reset( new ChunkManager( this , ns , _sharded[ ns ].key , _sharded[ns].unique ) );
_shards[ns] = m;
return m;
}
示例14: guessMergeShard
// TODO: Same limitations as other mongos metadata commands, sometimes we'll be stale here
// and fail. Need to better integrate targeting with commands.
ShardPtr guessMergeShard( const NamespaceString& nss, const BSONObj& minKey ) {
DBConfigPtr config = grid.getDBConfig( nss.ns() );
if ( !config->isSharded( nss ) ) {
config->reload();
if ( !config->isSharded( nss ) ) {
return ShardPtr();
}
}
ChunkManagerPtr manager = config->getChunkManager( nss );
if ( !manager ) return ShardPtr();
ChunkPtr chunk = manager->findChunkForDoc( minKey );
if ( !chunk ) return ShardPtr();
return ShardPtr( new Shard( chunk->getShard() ) );
}
示例15: _delete
void _delete( Request& r , DbMessage& d, ChunkManagerPtr manager ){
int flags = d.pullInt();
bool justOne = flags & 1;
uassert( 10203 , "bad delete message" , d.moreJSObjs() );
BSONObj pattern = d.nextJsObj();
vector<shared_ptr<ChunkRange> > chunks;
manager->getChunksForQuery( chunks , pattern );
log(2) << "delete : " << pattern << " \t " << chunks.size() << " justOne: " << justOne << endl;
if ( chunks.size() == 1 ){
doWrite( dbDelete , r , chunks[0]->getShard() );
return;
}
if ( justOne && ! pattern.hasField( "_id" ) )
throw UserException( 8015 , "can only delete with a non-shard key pattern if can delete as many as we find" );
set<Shard> seen;
for ( vector<shared_ptr<ChunkRange> >::iterator i=chunks.begin(); i!=chunks.end(); i++){
shared_ptr<ChunkRange> c = *i;
if ( seen.count( c->getShard() ) )
continue;
seen.insert( c->getShard() );
doWrite( dbDelete , r , c->getShard() );
}
}