本文整理汇总了C++中BSONObj::getOwned方法的典型用法代码示例。如果您正苦于以下问题:C++ BSONObj::getOwned方法的具体用法?C++ BSONObj::getOwned怎么用?C++ BSONObj::getOwned使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类BSONObj
的用法示例。
在下文中一共展示了BSONObj::getOwned方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: _doBalanceRound
void Balancer::_doBalanceRound( DBClientBase& conn, vector<CandidateChunkPtr>* candidateChunks ) {
verify( candidateChunks );
//
// 1. Check whether there is any sharded collection to be balanced by querying
// the ShardsNS::collections collection
//
auto_ptr<DBClientCursor> cursor = conn.query( ShardNS::collection , BSONObj() );
vector< string > collections;
while ( cursor->more() ) {
BSONObj col = cursor->nextSafe();
// sharded collections will have a shard "key".
if ( ! col["key"].eoo() && ! col["noBalance"].trueValue() ){
collections.push_back( col["_id"].String() );
}
else if( col["noBalance"].trueValue() ){
LOG(1) << "not balancing collection " << col["_id"].String() << ", explicitly disabled" << endl;
}
}
cursor.reset();
if ( collections.empty() ) {
LOG(1) << "no collections to balance" << endl;
return;
}
//
// 2. Get a list of all the shards that are participating in this balance round
// along with any maximum allowed quotas and current utilization. We get the
// latter by issuing db.serverStatus() (mem.mapped) to all shards.
//
// TODO: skip unresponsive shards and mark information as stale.
//
vector<Shard> allShards;
Shard::getAllShards( allShards );
if ( allShards.size() < 2) {
LOG(1) << "can't balance without more active shards" << endl;
return;
}
ShardInfoMap shardInfo;
for ( vector<Shard>::const_iterator it = allShards.begin(); it != allShards.end(); ++it ) {
const Shard& s = *it;
ShardStatus status = s.getStatus();
shardInfo[ s.getName() ] = ShardInfo( s.getMaxSize(),
status.mapped(),
s.isDraining(),
status.hasOpsQueued(),
s.tags()
);
}
//
// 3. For each collection, check if the balancing policy recommends moving anything around.
//
for (vector<string>::const_iterator it = collections.begin(); it != collections.end(); ++it ) {
const string& ns = *it;
map< string,vector<BSONObj> > shardToChunksMap;
cursor = conn.query( ShardNS::chunk , QUERY( "ns" << ns ).sort( "min" ) );
while ( cursor->more() ) {
BSONObj chunk = cursor->nextSafe();
if ( chunk["jumbo"].trueValue() )
continue;
vector<BSONObj>& chunks = shardToChunksMap[chunk["shard"].String()];
chunks.push_back( chunk.getOwned() );
}
cursor.reset();
if (shardToChunksMap.empty()) {
LOG(1) << "skipping empty collection (" << ns << ")";
continue;
}
for ( vector<Shard>::iterator i=allShards.begin(); i!=allShards.end(); ++i ) {
// this just makes sure there is an entry in shardToChunksMap for every shard
Shard s = *i;
shardToChunksMap[s.getName()].size();
}
DistributionStatus status( shardInfo, shardToChunksMap );
// load tags
conn.ensureIndex( ShardNS::tags, BSON( "ns" << 1 << "min" << 1 ), true );
cursor = conn.query( ShardNS::tags , QUERY( "ns" << ns ).sort( "min" ) );
while ( cursor->more() ) {
BSONObj tag = cursor->nextSafe();
uassert( 16356 , str::stream() << "tag ranges not valid for: " << ns ,
status.addTagRange( TagRange( tag["min"].Obj().getOwned(),
tag["max"].Obj().getOwned(),
tag["tag"].String() ) ) );
}
cursor.reset();
//.........这里部分代码省略.........
示例2: go
void go( const string db , const boost::filesystem::path outdir ) {
log() << "DATABASE: " << db << "\t to \t" << outdir.string() << endl;
boost::filesystem::create_directories( outdir );
map <string, BSONObj> collectionOptions;
multimap <string, BSONObj> indexes;
vector <string> collections;
// Save indexes for database
string ins = db + ".system.indexes";
auto_ptr<DBClientCursor> cursor = conn( true ).query( ins.c_str() , Query() , 0 , 0 , 0 , QueryOption_SlaveOk | QueryOption_NoCursorTimeout );
while ( cursor->more() ) {
BSONObj obj = cursor->nextSafe();
const string name = obj.getField( "ns" ).valuestr();
indexes.insert( pair<string, BSONObj> (name, obj.getOwned()) );
}
string sns = db + ".system.namespaces";
cursor = conn( true ).query( sns.c_str() , Query() , 0 , 0 , 0 , QueryOption_SlaveOk | QueryOption_NoCursorTimeout );
while ( cursor->more() ) {
BSONObj obj = cursor->nextSafe();
const string name = obj.getField( "name" ).valuestr();
if (obj.hasField("options")) {
collectionOptions.insert( pair<string,BSONObj> (name, obj.getField("options").embeddedObject()) );
}
// skip namespaces with $ in them only if we don't specify a collection to dump
if ( _coll == "" && name.find( ".$" ) != string::npos ) {
log(1) << "\tskipping collection: " << name << endl;
continue;
}
const string filename = name.substr( db.size() + 1 );
//if a particular collections is specified, and it's not this one, skip it
if ( _coll != "" && db + "." + _coll != name && _coll != name )
continue;
// raise error before writing collection with non-permitted filename chars in the name
size_t hasBadChars = name.find_first_of("/\0");
if (hasBadChars != string::npos){
error() << "Cannot dump " << name << ". Collection has '/' or null in the collection name." << endl;
continue;
}
// Don't dump indexes
if ( endsWith(name.c_str(), ".system.indexes") ) {
continue;
}
if ( _coll != "" && db + "." + _coll != name && _coll != name )
continue;
collections.push_back(name);
}
for (vector<string>::iterator it = collections.begin(); it != collections.end(); ++it) {
string name = *it;
const string filename = name.substr( db.size() + 1 );
writeCollectionFile( name , outdir / ( filename + ".bson" ) );
writeMetadataFile( name, outdir / (filename + ".metadata.json"), collectionOptions, indexes);
}
}
示例3: setErrInfo
void WriteErrorDetail::setErrInfo(const BSONObj& errInfo) {
_errInfo = errInfo.getOwned();
_isErrInfoSet = true;
}
示例4: work
//.........这里部分代码省略.........
}
RecordId rloc = member->loc;
// Deletes can't have projections. This means that covering analysis will always add
// a fetch. We should always get fetched data, and never just key data.
invariant(member->hasObj());
try {
// If the snapshot changed, then we have to make sure we have the latest copy of the
// doc and that it still matches.
std::unique_ptr<SeekableRecordCursor> cursor;
if (getOpCtx()->recoveryUnit()->getSnapshotId() != member->obj.snapshotId()) {
cursor = _collection->getCursor(getOpCtx());
if (!WorkingSetCommon::fetch(getOpCtx(), _ws, id, cursor)) {
// Doc is already deleted. Nothing more to do.
++_commonStats.needTime;
return PlanStage::NEED_TIME;
}
// Make sure the re-fetched doc still matches the predicate.
if (_params.canonicalQuery &&
!_params.canonicalQuery->root()->matchesBSON(member->obj.value(), NULL)) {
// Doesn't match.
++_commonStats.needTime;
return PlanStage::NEED_TIME;
}
}
// Ensure that the BSONObj underlying the WorkingSetMember is owned because saveState()
// is allowed to free the memory.
if (_params.returnDeleted) {
// Save a copy of the document that is about to get deleted, but keep it in the
// LOC_AND_OBJ state in case we need to retry deleting it.
BSONObj deletedDoc = member->obj.value();
member->obj.setValue(deletedDoc.getOwned());
}
// TODO: Do we want to buffer docs and delete them in a group rather than
// saving/restoring state repeatedly?
try {
if (supportsDocLocking()) {
// Doc-locking engines require this before saveState() since they don't use
// invalidations.
WorkingSetCommon::prepareForSnapshotChange(_ws);
}
child()->saveState();
} catch (const WriteConflictException& wce) {
std::terminate();
}
// Do the write, unless this is an explain.
if (!_params.isExplain) {
WriteUnitOfWork wunit(getOpCtx());
_collection->deleteDocument(getOpCtx(), rloc);
wunit.commit();
}
++_specificStats.docsDeleted;
} catch (const WriteConflictException& wce) {
// When we're doing a findAndModify with a sort, the sort will have a limit of 1, so will
// not produce any more results even if there is another matching document. Re-throw the WCE
// here so that these operations get another chance to find a matching document. The
// findAndModify command should automatically retry if it gets a WCE.
// TODO: this is not necessary if there was no sort specified.
if (_params.returnDeleted) {
throw;
示例5: cloneSplit
CollectionMetadata* CollectionMetadata::cloneSplit( const ChunkType& chunk,
const vector<BSONObj>& splitKeys,
const ChunkVersion& newShardVersion,
string* errMsg ) const {
// The error message string is optional.
string dummy;
if (errMsg == NULL) {
errMsg = &dummy;
}
// The version required in both resulting chunks could be simply an increment in the
// minor portion of the current version. However, we are enforcing uniqueness over the
// attributes <ns, version> of the configdb collection 'chunks'. So in practice, a
// migrate somewhere may force this split to pick up a version that has the major
// portion higher than the one that this shard has been using.
//
// TODO drop the uniqueness constraint and tighten the check below so that only the
// minor portion of version changes
if (newShardVersion <= _shardVersion) {
*errMsg = stream() << "cannot split chunk "
<< rangeToString( chunk.getMin(), chunk.getMax() )
<< ", new shard version "
<< newShardVersion.toString()
<< " is not greater than current version "
<< _shardVersion.toString();
warning() << *errMsg << endl;
return NULL;
}
// Check that we have the exact chunk that will be subtracted.
if ( !rangeMapContains( _chunksMap, chunk.getMin(), chunk.getMax() ) ) {
*errMsg = stream() << "cannot split chunk "
<< rangeToString( chunk.getMin(), chunk.getMax() )
<< ", this shard does not contain the chunk";
if ( rangeMapOverlaps( _chunksMap, chunk.getMin(), chunk.getMax() ) ) {
RangeVector overlap;
getRangeMapOverlap( _chunksMap, chunk.getMin(), chunk.getMax(), &overlap );
*errMsg += stream() << " and it overlaps " << overlapToString( overlap );
}
warning() << *errMsg << endl;
return NULL;
}
// Check that the split key is valid
for ( vector<BSONObj>::const_iterator it = splitKeys.begin(); it != splitKeys.end(); ++it )
{
if (!rangeContains(chunk.getMin(), chunk.getMax(), *it)) {
*errMsg = stream() << "cannot split chunk "
<< rangeToString( chunk.getMin(), chunk.getMax() ) << " at key "
<< *it;
warning() << *errMsg << endl;
return NULL;
}
}
auto_ptr<CollectionMetadata> metadata(new CollectionMetadata);
metadata->_keyPattern = this->_keyPattern;
metadata->_keyPattern.getOwned();
metadata->fillKeyPatternFields();
metadata->_pendingMap = this->_pendingMap;
metadata->_chunksMap = this->_chunksMap;
metadata->_shardVersion = newShardVersion; // will increment 2nd, 3rd,... chunks below
BSONObj startKey = chunk.getMin();
for ( vector<BSONObj>::const_iterator it = splitKeys.begin(); it != splitKeys.end();
++it ) {
BSONObj split = *it;
metadata->_chunksMap[chunk.getMin()] = split.getOwned();
metadata->_chunksMap.insert( make_pair( split.getOwned(), chunk.getMax().getOwned() ) );
metadata->_shardVersion.incMinor();
startKey = split;
}
metadata->_collVersion =
metadata->_shardVersion > _collVersion ? metadata->_shardVersion : _collVersion;
metadata->fillRanges();
dassert(metadata->isValid());
return metadata.release();
}
示例6: runQuery
//.........这里部分代码省略.........
BSONObj order = pq.getOrder();
BSONObj query = pq.getFilter();
/* The ElemIter will not be happy if this isn't really an object. So throw exception
here when that is true.
(Which may indicate bad data from client.)
*/
if ( query.objsize() == 0 ) {
out() << "Bad query object?\n jsobj:";
out() << jsobj.toString() << "\n query:";
out() << query.toString() << endl;
uassert( 10110 , "bad query object", false);
}
// Tailable cursors need to read newly written entries from the tail
// of the collection. They manually arbitrate with the collection over
// what data is readable and when, so we choose read uncommited isolation.
OpSettings settings;
settings.setQueryCursorMode(DEFAULT_LOCK_CURSOR);
settings.setBulkFetch(true);
settings.setCappedAppendPK(pq.hasOption(QueryOption_AddHiddenPK));
cc().setOpSettings(settings);
// If our caller has a transaction, it's multi-statement.
const bool inMultiStatementTxn = cc().hasTxn();
if (tailable) {
// Because it's easier to disable this. It shouldn't be happening in a normal system.
uassert(16812, "May not perform a tailable query in a multi-statement transaction.",
!inMultiStatementTxn);
}
// Begin a read-only, snapshot transaction under normal circumstances.
// If the cursor is tailable, we need to be able to read uncommitted data.
const int txnFlags = (tailable ? DB_READ_UNCOMMITTED : DB_TXN_SNAPSHOT) | DB_TXN_READ_ONLY;
LOCK_REASON(lockReason, "query");
Client::ReadContext ctx(ns, lockReason);
scoped_ptr<Client::Transaction> transaction(!inMultiStatementTxn ?
new Client::Transaction(txnFlags) : NULL);
bool hasRetried = false;
while ( 1 ) {
try {
replVerifyReadsOk(&pq);
// Fast-path for primary key queries.
if (!explain && !tailable) {
replVerifyReadsOk(&pq);
if (_tryQueryByPKHack(ns, query, pq, curop, result)) {
if (transaction) {
transaction->commit();
}
return "";
}
}
// sanity check the query and projection
if (pq.getFields() != NULL) {
pq.getFields()->validateQuery( query );
}
if (tailable) {
Collection *cl = getCollection( ns );
if (cl != NULL && !(cl->isCapped() || str::equals(ns, rsoplog))) {
uasserted( 13051, "tailable cursor requested on non-capped, non-oplog collection" );
}
const BSONObj nat1 = BSON( "$natural" << 1 );
if ( order.isEmpty() ) {
order = nat1;
} else {
uassert( 13052, "only {$natural:1} order allowed for tailable cursor", order == nat1 );
}
}
// Run a regular query.
// these now may stored in a ClientCursor or somewhere else,
// so make sure we use a real copy
jsobj = jsobj.getOwned();
query = query.getOwned();
order = order.getOwned();
const ConfigVersion shardingVersionAtStart = shardingState.getVersion( ns );
const bool getCachedExplainPlan = ! hasRetried && explain && ! pq.hasIndexSpecifier();
const bool savedCursor = queryWithQueryOptimizer( queryOptions, ns, jsobj, curop, query,
order, pq_shared, shardingVersionAtStart,
getCachedExplainPlan, inMultiStatementTxn,
result );
// Did not save the cursor, so we can commit the transaction now if it exists.
if (transaction && !savedCursor) {
transaction->commit();
}
return curop.debug().exhaust ? ns : "";
}
catch ( const QueryRetryException & ) {
// In some cases the query may be retried if there is an in memory sort size assertion.
verify( ! hasRetried );
hasRetried = true;
}
}
}
示例7: runParsed
//.........这里部分代码省略.........
// ('ws') and the PipelineProxyStage ('proxy') will be owned by the created
// PlanExecutor.
auto ws = make_unique<WorkingSet>();
auto proxy = make_unique<PipelineProxyStage>(txn, pipeline, ws.get());
auto statusWithPlanExecutor = (NULL == collection)
? PlanExecutor::make(
txn, std::move(ws), std::move(proxy), nss.ns(), PlanExecutor::YIELD_MANUAL)
: PlanExecutor::make(
txn, std::move(ws), std::move(proxy), collection, PlanExecutor::YIELD_MANUAL);
invariant(statusWithPlanExecutor.isOK());
exec = std::move(statusWithPlanExecutor.getValue());
{
auto planSummary = Explain::getPlanSummary(exec.get());
stdx::lock_guard<Client>(*txn->getClient());
curOp->setPlanSummary_inlock(std::move(planSummary));
}
if (collection) {
PlanSummaryStats stats;
Explain::getSummaryStats(*exec, &stats);
collection->infoCache()->notifyOfQuery(txn, stats.indexesUsed);
}
if (collection) {
const bool isAggCursor = true; // enable special locking behavior
ClientCursor* cursor =
new ClientCursor(collection->getCursorManager(),
exec.release(),
nss.ns(),
txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(),
0,
cmdObj.getOwned(),
isAggCursor);
pin.reset(new ClientCursorPin(collection->getCursorManager(), cursor->cursorid()));
// Don't add any code between here and the start of the try block.
}
// At this point, it is safe to release the collection lock.
// - In the case where we have a collection: we will need to reacquire the
// collection lock later when cleaning up our ClientCursorPin.
// - In the case where we don't have a collection: our PlanExecutor won't be
// registered, so it will be safe to clean it up outside the lock.
invariant(!exec || !collection);
}
try {
// Unless set to true, the ClientCursor created above will be deleted on block exit.
bool keepCursor = false;
// Use of the aggregate command without specifying to use a cursor is deprecated.
// Applications should migrate to using cursors. Cursors are strictly more useful than
// outputting the results as a single document, since results that fit inside a single
// BSONObj will also fit inside a single batch.
//
// We occasionally log a deprecation warning.
if (!request.isCursorCommand()) {
RARELY {
warning()
<< "Use of the aggregate command without the 'cursor' "
"option is deprecated. See "
"http://dochub.mongodb.org/core/aggregate-without-cursor-deprecation.";
}
}
示例8: _syncDoInitialSync
/**
* Do the initial sync for this member.
*/
void ReplSetImpl::_syncDoInitialSync() {
replset::InitialSync init(replset::BackgroundSync::get());
sethbmsg("initial sync pending",0);
// if this is the first node, it may have already become primary
if ( box.getState().primary() ) {
sethbmsg("I'm already primary, no need for initial sync",0);
return;
}
const Member *source = getMemberToSyncTo();
if (!source) {
sethbmsg("initial sync need a member to be primary or secondary to do our initial sync", 0);
sleepsecs(15);
return;
}
string sourceHostname = source->h().toString();
init.setHostname(sourceHostname);
OplogReader r;
if( !r.connect(sourceHostname) ) {
sethbmsg( str::stream() << "initial sync couldn't connect to " << source->h().toString() , 0);
sleepsecs(15);
return;
}
BSONObj lastOp = r.getLastOp(rsoplog);
if( lastOp.isEmpty() ) {
sethbmsg("initial sync couldn't read remote oplog", 0);
sleepsecs(15);
return;
}
if (replSettings.fastsync) {
log() << "fastsync: skipping database clone" << rsLog;
// prime oplog
init.oplogApplication(lastOp, lastOp);
return;
}
else {
sethbmsg("initial sync drop all databases", 0);
dropAllDatabasesExceptLocal();
sethbmsg("initial sync clone all databases", 0);
list<string> dbs = r.conn()->getDatabaseNames();
if ( ! _syncDoInitialSync_clone( sourceHostname.c_str(), dbs, true ) ) {
veto(source->fullName(), 600);
sleepsecs(300);
return;
}
sethbmsg("initial sync data copy, starting syncup",0);
BSONObj minValid;
if ( ! _syncDoInitialSync_applyToHead( init, &r , source , lastOp , minValid ) ) {
return;
}
lastOp = minValid;
// its currently important that lastOp is equal to the last op we actually pulled
// this is because the background thread only pulls each op once now
// so if its now, we'll be waiting forever
{
// this takes whatever the last op the we got is
// and stores it locally before we wipe it out below
Lock::DBRead lk(rsoplog);
Helpers::getLast(rsoplog, lastOp);
lastOp = lastOp.getOwned();
}
// reset state, as that "didn't count"
emptyOplog();
lastOpTimeWritten = OpTime();
lastH = 0;
sethbmsg("initial sync building indexes",0);
if ( ! _syncDoInitialSync_clone( sourceHostname.c_str(), dbs, false ) ) {
veto(source->fullName(), 600);
sleepsecs(300);
return;
}
}
sethbmsg("initial sync query minValid",0);
BSONObj minValid;
if ( ! _syncDoInitialSync_applyToHead( init, &r, source, lastOp, minValid ) ) {
return;
}
// ---------
sethbmsg("initial sync finishing up",0);
//.........这里部分代码省略.........
示例9: calculateConfigDiff
int ConfigDiffTracker<ValType,ShardType>::
calculateConfigDiff( DBClientCursorInterface& diffCursor )
{
MONGO_LOG_DEFAULT_COMPONENT_LOCAL(::mongo::logger::LogComponent::kSharding);
verifyAttached();
// Apply the chunk changes to the ranges and versions
//
// Overall idea here is to work in two steps :
// 1. For all the new chunks we find, increment the maximum version per-shard and
// per-collection, and remove any conflicting chunks from the ranges
// 2. For all the new chunks we're interested in (all of them for mongos, just chunks on the
// shard for mongod) add them to the ranges
//
vector<BSONObj> newTracked;
// Store epoch now so it doesn't change when we change max
OID currEpoch = _maxVersion->epoch();
_validDiffs = 0;
while( diffCursor.more() ){
BSONObj diffChunkDoc = diffCursor.next();
ChunkVersion chunkVersion = ChunkVersion::fromBSON(diffChunkDoc, ChunkType::DEPRECATED_lastmod());
if( diffChunkDoc[ChunkType::min()].type() != Object ||
diffChunkDoc[ChunkType::max()].type() != Object ||
diffChunkDoc[ChunkType::shard()].type() != String )
{
warning() << "got invalid chunk document " << diffChunkDoc
<< " when trying to load differing chunks" << endl;
continue;
}
if( ! chunkVersion.isSet() || ! chunkVersion.hasEqualEpoch( currEpoch ) ){
warning() << "got invalid chunk version " << chunkVersion << " in document " << diffChunkDoc
<< " when trying to load differing chunks at version "
<< ChunkVersion( _maxVersion->majorVersion(),
_maxVersion->minorVersion(),
currEpoch ) << endl;
// Don't keep loading, since we know we'll be broken here
return -1;
}
_validDiffs++;
// Get max changed version and chunk version
if( chunkVersion > *_maxVersion ) *_maxVersion = chunkVersion;
// Chunk version changes
ShardType shard = shardFor( diffChunkDoc[ChunkType::shard()].String() );
typename map<ShardType, ChunkVersion>::iterator shardVersionIt = _maxShardVersions->find( shard );
if( shardVersionIt == _maxShardVersions->end() || shardVersionIt->second < chunkVersion ){
(*_maxShardVersions)[ shard ] = chunkVersion;
}
// See if we need to remove any chunks we are currently tracking b/c of this chunk's changes
removeOverlapping(diffChunkDoc[ChunkType::min()].Obj(),
diffChunkDoc[ChunkType::max()].Obj());
// Figure out which of the new chunks we need to track
// Important - we need to actually own this doc, in case the cursor decides to getMore or unbuffer
if( isTracked( diffChunkDoc ) ) newTracked.push_back( diffChunkDoc.getOwned() );
}
LOG(3) << "found " << _validDiffs
<< " new chunks for collection " << _ns
<< " (tracking " << newTracked.size()
<< "), new version is " << *_maxVersion
<< endl;
for( vector<BSONObj>::iterator it = newTracked.begin(); it != newTracked.end(); it++ ){
BSONObj chunkDoc = *it;
// Important - we need to make sure we actually own the min and max here
BSONObj min = chunkDoc[ChunkType::min()].Obj().getOwned();
BSONObj max = chunkDoc[ChunkType::max()].Obj().getOwned();
// Invariant enforced by sharding
// It's possible to read inconsistent state b/c of getMore() and yielding, so we want
// to detect as early as possible.
// TODO: This checks for overlap, we also should check for holes here iff we're tracking
// all chunks
if( isOverlapping( min, max ) ) return -1;
_currMap->insert( rangeFor( chunkDoc, min, max ) );
}
return _validDiffs;
}
示例10: getLastError
bool ClientInfo::getLastError( const BSONObj& options , BSONObjBuilder& result , bool fromWriteBackListener ) {
set<string> * shards = getPrev();
if ( shards->size() == 0 ) {
result.appendNull( "err" );
return true;
}
vector<WBInfo> writebacks;
// handle single server
if ( shards->size() == 1 ) {
string theShard = *(shards->begin() );
BSONObj res;
bool ok = false;
{
ShardConnection conn( theShard , "" );
try {
ok = conn->runCommand( "admin" , options , res );
}
catch( std::exception &e ) {
warning() << "could not get last error from shard " << theShard << causedBy( e ) << endl;
// Catch everything that happens here, since we need to ensure we return our connection when we're
// finished.
conn.done();
return false;
}
res = res.getOwned();
conn.done();
}
_addWriteBack( writebacks , res );
// hit other machines just to block
for ( set<string>::const_iterator i=sinceLastGetError().begin(); i!=sinceLastGetError().end(); ++i ) {
string temp = *i;
if ( temp == theShard )
continue;
try {
ShardConnection conn( temp , "" );
ON_BLOCK_EXIT_OBJ( conn, &ShardConnection::done );
_addWriteBack( writebacks , conn->getLastErrorDetailed() );
}
catch( std::exception &e ){
warning() << "could not clear last error from shard " << temp << causedBy( e ) << endl;
}
}
clearSinceLastGetError();
if ( writebacks.size() ){
vector<BSONObj> v = _handleWriteBacks( writebacks , fromWriteBackListener );
if ( v.size() == 0 && fromWriteBackListener ) {
// ok
}
else {
// this will usually be 1
// it can be greater than 1 if a write to a different shard
// than the last write op had a writeback
// all we're going to report is the first
// since that's the current write
// but we block for all
verify( v.size() >= 1 );
result.appendElements( v[0] );
result.appendElementsUnique( res );
result.append( "writebackGLE" , v[0] );
result.append( "initialGLEHost" , theShard );
}
}
else {
result.append( "singleShard" , theShard );
result.appendElements( res );
}
return ok;
}
BSONArrayBuilder bbb( result.subarrayStart( "shards" ) );
BSONObjBuilder shardRawGLE;
long long n = 0;
int updatedExistingStat = 0; // 0 is none, -1 has but false, 1 has true
// hit each shard
vector<string> errors;
vector<BSONObj> errorObjects;
for ( set<string>::iterator i = shards->begin(); i != shards->end(); i++ ) {
string theShard = *i;
bbb.append( theShard );
//.........这里部分代码省略.........
示例11: work
PlanStage::StageState IndexScan::work(WorkingSetID* out) {
++_commonStats.works;
// Adds the amount of time taken by work() to executionTimeMillis.
ScopedTimer timer(&_commonStats.executionTimeMillis);
// If we examined multiple keys in a prior work cycle, make up for it here by returning
// NEED_TIME. This is done for plan ranking. Refer to the comment for '_checkEndKeys'
// in the .h for details.
if (_checkEndKeys > 0) {
--_checkEndKeys;
++_commonStats.needTime;
return PlanStage::NEED_TIME;
}
if (NULL == _indexCursor.get()) {
// First call to work(). Perform possibly heavy init.
initIndexScan();
checkEnd();
++_commonStats.needTime;
return PlanStage::NEED_TIME;
}
else if (_yieldMovedCursor) {
_yieldMovedCursor = false;
// Note that we're not calling next() here. We got the next thing when we recovered
// from yielding.
}
if (isEOF()) { return PlanStage::IS_EOF; }
// Grab the next (key, value) from the index.
BSONObj keyObj = _indexCursor->getKey();
DiskLoc loc = _indexCursor->getValue();
// Move to the next result.
// The underlying IndexCursor points at the *next* thing we want to return. We do this so
// that if we're scanning an index looking for docs to delete we don't continually clobber
// the thing we're pointing at.
_indexCursor->next();
checkEnd();
if (_shouldDedup) {
++_specificStats.dupsTested;
if (_returned.end() != _returned.find(loc)) {
++_specificStats.dupsDropped;
++_commonStats.needTime;
return PlanStage::NEED_TIME;
}
else {
_returned.insert(loc);
}
}
if (Filter::passes(keyObj, _keyPattern, _filter)) {
if (NULL != _filter) {
++_specificStats.matchTested;
}
// We must make a copy of the on-disk data since it can mutate during the execution of
// this query.
BSONObj ownedKeyObj = keyObj.getOwned();
// Fill out the WSM.
WorkingSetID id = _workingSet->allocate();
WorkingSetMember* member = _workingSet->get(id);
member->loc = loc;
member->keyData.push_back(IndexKeyDatum(_keyPattern, ownedKeyObj));
member->state = WorkingSetMember::LOC_AND_IDX;
if (_params.addKeyMetadata) {
BSONObjBuilder bob;
bob.appendKeys(_keyPattern, ownedKeyObj);
member->addComputed(new IndexKeyComputedData(bob.obj()));
}
*out = id;
++_commonStats.advanced;
return PlanStage::ADVANCED;
}
++_commonStats.needTime;
return PlanStage::NEED_TIME;
}
示例12: run
bool run(OperationContext* txn,
const string& dbname,
BSONObj& jsobj,
int,
string& errmsg,
BSONObjBuilder& result) {
const std::string ns = parseNs(dbname, jsobj);
md5digest d;
md5_state_t st;
md5_init(&st);
int n = 0;
bool partialOk = jsobj["partialOk"].trueValue();
if (partialOk) {
// WARNING: This code depends on the binary layout of md5_state. It will not be
// compatible with different md5 libraries or work correctly in an environment with
// mongod's of different endians. It is ok for mongos to be a different endian since
// it just passes the buffer through to another mongod.
BSONElement stateElem = jsobj["md5state"];
if (!stateElem.eoo()) {
int len;
const char* data = stateElem.binDataClean(len);
massert(16247, "md5 state not correct size", len == sizeof(st));
memcpy(&st, data, sizeof(st));
}
n = jsobj["startAt"].numberInt();
}
BSONObj query = BSON("files_id" << jsobj["filemd5"] << "n" << GTE << n);
BSONObj sort = BSON("files_id" << 1 << "n" << 1);
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
CanonicalQuery* cq;
if (!CanonicalQuery::canonicalize(ns, query, sort, BSONObj(), &cq).isOK()) {
uasserted(17240, "Can't canonicalize query " + query.toString());
return 0;
}
// Check shard version at startup.
// This will throw before we've done any work if shard version is outdated
// We drop and re-acquire these locks every document because md5'ing is expensive
unique_ptr<AutoGetCollectionForRead> ctx(new AutoGetCollectionForRead(txn, ns));
Collection* coll = ctx->getCollection();
PlanExecutor* rawExec;
if (!getExecutor(txn,
coll,
cq,
PlanExecutor::YIELD_MANUAL,
&rawExec,
QueryPlannerParams::NO_TABLE_SCAN).isOK()) {
uasserted(17241, "Can't get executor for query " + query.toString());
return 0;
}
unique_ptr<PlanExecutor> exec(rawExec);
// Process notifications when the lock is released/reacquired in the loop below
exec->registerExec();
BSONObj obj;
PlanExecutor::ExecState state;
while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
BSONElement ne = obj["n"];
verify(ne.isNumber());
int myn = ne.numberInt();
if (n != myn) {
if (partialOk) {
break; // skipped chunk is probably on another shard
}
log() << "should have chunk: " << n << " have:" << myn << endl;
dumpChunks(txn, ns, query, sort);
uassert(10040, "chunks out of order", n == myn);
}
// make a copy of obj since we access data in it while yielding locks
BSONObj owned = obj.getOwned();
exec->saveState();
// UNLOCKED
ctx.reset();
int len;
const char* data = owned["data"].binDataClean(len);
// This is potentially an expensive operation, so do it out of the lock
md5_append(&st, (const md5_byte_t*)(data), len);
n++;
try {
// RELOCKED
ctx.reset(new AutoGetCollectionForRead(txn, ns));
} catch (const SendStaleConfigException& ex) {
LOG(1) << "chunk metadata changed during filemd5, will retarget and continue";
break;
}
// Have the lock again. See if we were killed.
if (!exec->restoreState(txn)) {
if (!partialOk) {
uasserted(13281, "File deleted during filemd5 command");
//.........这里部分代码省略.........
示例13: initFields
void ParsedQuery::initFields( const BSONObj& fields ) {
if ( fields.isEmpty() )
return;
_fields.reset( new Projection() );
_fields->init( fields.getOwned() );
}
示例14: Status
Status MMAPV1DatabaseCatalogEntry::_renameSingleNamespace( OperationContext* txn,
const StringData& fromNS,
const StringData& toNS,
bool stayTemp ) {
// some sanity checking
NamespaceDetails* fromDetails = _namespaceIndex.details( fromNS );
if ( !fromDetails )
return Status( ErrorCodes::BadValue, "from namespace doesn't exist" );
if ( _namespaceIndex.details( toNS ) )
return Status( ErrorCodes::BadValue, "to namespace already exists" );
_removeFromCache( fromNS );
// at this point, we haven't done anything destructive yet
// ----
// actually start moving
// ----
// this could throw, but if it does we're ok
_namespaceIndex.add_ns( txn, toNS, fromDetails );
NamespaceDetails* toDetails = _namespaceIndex.details( toNS );
try {
toDetails->copyingFrom(txn,
toNS,
_namespaceIndex,
fromDetails); // fixes extraOffset
}
catch( DBException& ) {
// could end up here if .ns is full - if so try to clean up / roll back a little
_namespaceIndex.kill_ns( txn, toNS );
throw;
}
// at this point, code .ns stuff moved
_namespaceIndex.kill_ns( txn, fromNS );
fromDetails = NULL;
// fix system.namespaces
BSONObj newSpec;
DiskLoc oldSpecLocation;
{
BSONObj oldSpec;
{
RecordStoreV1Base* rs = _getNamespaceRecordStore();
scoped_ptr<RecordIterator> it( rs->getIterator(txn) );
while ( !it->isEOF() ) {
DiskLoc loc = it->getNext();
BSONObj entry = it->dataFor( loc ).toBson();
if ( fromNS == entry["name"].String() ) {
oldSpecLocation = loc;
oldSpec = entry.getOwned();
break;
}
}
}
invariant( !oldSpec.isEmpty() );
invariant( !oldSpecLocation.isNull() );
BSONObjBuilder b;
BSONObjIterator i( oldSpec.getObjectField( "options" ) );
while( i.more() ) {
BSONElement e = i.next();
if ( strcmp( e.fieldName(), "create" ) != 0 ) {
if (stayTemp || (strcmp(e.fieldName(), "temp") != 0))
b.append( e );
}
else {
b << "create" << toNS;
}
}
newSpec = b.obj();
}
_addNamespaceToNamespaceCollection( txn, toNS, newSpec.isEmpty() ? 0 : &newSpec );
_getNamespaceRecordStore()->deleteRecord( txn, oldSpecLocation );
boost::mutex::scoped_lock lk( _collectionsLock );
Entry*& entry = _collections[toNS.toString()];
invariant( entry == NULL );
entry = new Entry();
_fillInEntry_inlock( txn, toNS, entry );
return Status::OK();
}
示例15: _run
bool _run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
if( cmdObj["replSetReconfig"].type() != Object ) {
errmsg = "no configuration specified";
return false;
}
// We might want to add the protocol version of theReplSet->config() if it exists,
// instead of just blindly adding our compiled-in CURRENT_PROTOCOL_VERSION. But for
// TokuMX 1.0 it doesn't matter.
BSONObj configObj = ReplSetConfig::addProtocolVersionIfMissing(cmdObj["replSetReconfig"].Obj());
bool force = cmdObj.hasField("force") && cmdObj["force"].trueValue();
if( force && !theReplSet ) {
replSettings.reconfig = configObj.getOwned();
result.append("msg", "will try this config momentarily, try running rs.conf() again in a few seconds");
return true;
}
if ( !check(errmsg, result) ) {
return false;
}
if( !force && !theReplSet->box.getState().primary() ) {
errmsg = "replSetReconfig command must be sent to the current replica set primary.";
return false;
}
{
// just make sure we can get a write lock before doing anything else. we'll reacquire one
// later. of course it could be stuck then, but this check lowers the risk if weird things
// are up - we probably don't want a change to apply 30 minutes after the initial attempt.
time_t t = time(0);
Lock::GlobalWrite lk;
if( time(0)-t > 20 ) {
errmsg = "took a long time to get write lock, so not initiating. Initiate when server less busy?";
return false;
}
}
try {
scoped_ptr<ReplSetConfig> newConfig
(ReplSetConfig::make(configObj, force));
log() << "replSet replSetReconfig config object parses ok, " <<
newConfig->members.size() << " members specified" << rsLog;
if( !ReplSetConfig::legalChange(theReplSet->getConfig(), *newConfig, errmsg) ) {
return false;
}
checkMembersUpForConfigChange(*newConfig, result, false);
log() << "replSet replSetReconfig [2]" << rsLog;
theReplSet->haveNewConfig(*newConfig, true);
ReplSet::startupStatusMsg.set("replSetReconfig'd");
}
catch( DBException& e ) {
log() << "replSet replSetReconfig exception: " << e.what() << rsLog;
throw;
}
catch( string& se ) {
log() << "replSet reconfig exception: " << se << rsLog;
errmsg = se;
return false;
}
resetSlaveCache();
return true;
}